diff --git a/.github/actions/constellation_create/aws-logs.sh b/.github/actions/constellation_create/aws-logs.sh index 298e91e9d..40cccf614 100755 --- a/.github/actions/constellation_create/aws-logs.sh +++ b/.github/actions/constellation_create/aws-logs.sh @@ -2,50 +2,50 @@ # Usage: ./aws-logs.sh -controlAutoscalingGroup=$(\ - terraform show -json | \ +controlAutoscalingGroup=$( + terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | - .resources[0].values.name' \ + .resources[0].values.name' ) -workerAutoscalingGroup=$(\ - terraform show -json | \ +workerAutoscalingGroup=$( + terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_worker_nodes") | - .resources[0].values.name' \ + .resources[0].values.name' ) -controlInstances=$(\ - aws autoscaling describe-auto-scaling-groups \ - --region "${1}" \ - --no-paginate \ - --output json \ - --auto-scaling-group-names "${controlAutoscalingGroup}" | \ - jq -r '.AutoScalingGroups[0].Instances[].InstanceId' \ +controlInstances=$( + aws autoscaling describe-auto-scaling-groups \ + --region "${1}" \ + --no-paginate \ + --output json \ + --auto-scaling-group-names "${controlAutoscalingGroup}" | + jq -r '.AutoScalingGroups[0].Instances[].InstanceId' ) -workerInstances=$(\ - aws autoscaling describe-auto-scaling-groups \ - --region "${1}" \ - --no-paginate \ - --output json \ - --auto-scaling-group-names "${workerAutoscalingGroup}" | \ - jq -r '.AutoScalingGroups[0].Instances[].InstanceId' \ +workerInstances=$( + aws autoscaling describe-auto-scaling-groups \ + --region "${1}" \ + --no-paginate \ + --output json \ + --auto-scaling-group-names "${workerAutoscalingGroup}" | + jq -r '.AutoScalingGroups[0].Instances[].InstanceId' ) echo "Fetching logs from control planes: ${controlInstances}" for instance in ${controlInstances}; do - printf "Fetching for %s\n" "${instance}" - aws ec2 get-console-output --region "${1}" --instance-id "${instance}" | \ - jq -r .'Output' | \ - tail -n +2 > control-plane-"${instance}".log + printf "Fetching for %s\n" "${instance}" + aws ec2 get-console-output --region "${1}" --instance-id "${instance}" | + jq -r .'Output' | + tail -n +2 > control-plane-"${instance}".log done echo "Fetching logs from worker nodes: ${workerInstances}" for instance in ${workerInstances}; do - printf "Fetching for %s\n" "${instance}" - aws ec2 get-console-output --region "${1}" --instance-id "${instance}" | \ - jq -r .'Output' | \ - tail -n +2 > worker-"${instance}".log + printf "Fetching for %s\n" "${instance}" + aws ec2 get-console-output --region "${1}" --instance-id "${instance}" | + jq -r .'Output' | + tail -n +2 > worker-"${instance}".log done diff --git a/.github/actions/constellation_create/az-logs.sh b/.github/actions/constellation_create/az-logs.sh index 27a6bf35f..118e99822 100755 --- a/.github/actions/constellation_create/az-logs.sh +++ b/.github/actions/constellation_create/az-logs.sh @@ -8,17 +8,28 @@ printf "Fetching logs of instances in resource group %s\n" "${1}" # get list of all scale sets scalesetsjson=$(az vmss list --resource-group "${1}" -o json) scalesetslist=$(echo "${scalesetsjson}" | jq -r '.[] | .name') -subscription=$(az account show | jq -r .id) +subscription=$(az account show | jq -r .id) printf "Checking scalesets %s\n" "${scalesetslist}" for scaleset in ${scalesetslist}; do - instanceids=$(az vmss list-instances --resource-group "${1}" --name "${scaleset}" -o json | jq -r '.[] | .instanceId') - printf "Checking instance IDs %s\n" "${instanceids}" - for instanceid in ${instanceids}; do - bloburi=$(az rest --method post --url https://management.azure.com/subscriptions/"${subscription}"/resourceGroups/"${1}"/providers/Microsoft.Compute/virtualMachineScaleSets/"${scaleset}"/virtualmachines/"${instanceid}"/retrieveBootDiagnosticsData?api-version=2022-03-01 | jq '.serialConsoleLogBlobUri' -r) - sleep 4 - curl -sL -o "./${scaleset}-${instanceid}.log" "${bloburi}" - realpath "./${scaleset}-${instanceid}.log" - done + instanceids=$( + az vmss list-instances \ + --resource-group "${1}" \ + --name "${scaleset}" \ + -o json | + jq -r '.[] | .instanceId' + ) + printf "Checking instance IDs %s\n" "${instanceids}" + for instanceid in ${instanceids}; do + bloburi=$( + az rest \ + --method post \ + --url https://management.azure.com/subscriptions/"${subscription}"/resourceGroups/"${1}"/providers/Microsoft.Compute/virtualMachineScaleSets/"${scaleset}"/virtualmachines/"${instanceid}"/retrieveBootDiagnosticsData?api-version=2022-03-01 | + jq '.serialConsoleLogBlobUri' -r + ) + sleep 4 + curl -sL -o "./${scaleset}-${instanceid}.log" "${bloburi}" + realpath "./${scaleset}-${instanceid}.log" + done done diff --git a/.github/actions/constellation_create/gcp-logs.sh b/.github/actions/constellation_create/gcp-logs.sh index 951ea580f..735a19420 100755 --- a/.github/actions/constellation_create/gcp-logs.sh +++ b/.github/actions/constellation_create/gcp-logs.sh @@ -3,9 +3,9 @@ set -euo pipefail shopt -s inherit_errexit -controlInstanceGroup=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.base_instance_name' ) +controlInstanceGroup=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.base_instance_name') workerInstanceGroup=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_worker") | .resources[0].values.base_instance_name') -zone=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.zone' ) +zone=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.zone') controlInstanceGroup=${controlInstanceGroup##*/} workerInstanceGroupShort=${workerInstanceGroup##*/} diff --git a/hack/azure-jump-host/jump-host-create b/hack/azure-jump-host/jump-host-create index 5c3e27d22..0fa13d1bd 100755 --- a/hack/azure-jump-host/jump-host-create +++ b/hack/azure-jump-host/jump-host-create @@ -3,7 +3,7 @@ set -euo pipefail shopt -s inherit_errexit -SCRIPTDIR="$(dirname -- "$(realpath "${BASH_SOURCE[0]}")"; )"; +SCRIPTDIR="$(dirname -- "$(realpath "${BASH_SOURCE[0]}")")" RG=$(jq -r .azureresourcegroup constellation-state.json) SUBNET=$(jq -r .azuresubnet constellation-state.json) VNET=${SUBNET%"/subnets/nodeNetwork"} @@ -12,16 +12,22 @@ DEPLOYMENT_NAME=jump-host VM_NAME=jump-host az deployment group create \ - -o none \ - --name "${DEPLOYMENT_NAME}" \ - --resource-group "${RG}" \ - --template-file "${SCRIPTDIR}/template.json" \ - --parameters "@${SCRIPTDIR}/parameters.json" \ - --parameters "{ \"virtualNetworkId\": { \"value\": \"${VNET}\" } }" \ - --parameters "{ \"subnetRef\": { \"value\": \"${SUBNET}\" } }" \ - --parameters "{ \"adminPublicKey\": { \"value\": \"${PUBKEY}\" } }" + -o none \ + --name "${DEPLOYMENT_NAME}" \ + --resource-group "${RG}" \ + --template-file "${SCRIPTDIR}/template.json" \ + --parameters "@${SCRIPTDIR}/parameters.json" \ + --parameters "{ \"virtualNetworkId\": { \"value\": \"${VNET}\" } }" \ + --parameters "{ \"subnetRef\": { \"value\": \"${SUBNET}\" } }" \ + --parameters "{ \"adminPublicKey\": { \"value\": \"${PUBKEY}\" } }" az deployment group wait --created --name "${DEPLOYMENT_NAME}" --resource-group "${RG}" -PUBIP=$(az vm list-ip-addresses --resource-group "${RG}" --name "${VM_NAME}" --query "[].virtualMachine.network.publicIpAddresses[0].ipAddress" --output tsv) +PUBIP=$( + az vm list-ip-addresses \ + --resource-group "${RG}" \ + --name "${VM_NAME}" \ + --query "[].virtualMachine.network.publicIpAddresses[0].ipAddress" \ + --output tsv +) echo "Jump host created. Cleanup by deleteing the resource group." echo "Connect to the jump host with the following command:" echo -e "ssh azureuser@${PUBIP}\n" diff --git a/hack/check-licenses.sh b/hack/check-licenses.sh index 1fb949d6f..52e8ea573 100755 --- a/hack/check-licenses.sh +++ b/hack/check-licenses.sh @@ -13,44 +13,44 @@ not_allowed() { go mod download go-licenses csv ./... | { -while read -r line; do + while read -r line; do - pkg=${line%%,*} - lic=${line##*,} + pkg=${line%%,*} + lic=${line##*,} - case ${lic} in - Apache-2.0|BSD-2-Clause|BSD-3-Clause|ISC|MIT) - ;; + case ${lic} in + Apache-2.0 | BSD-2-Clause | BSD-3-Clause | ISC | MIT) ;; - MPL-2.0) + \ + MPL-2.0) case ${pkg} in - github.com/talos-systems/talos/pkg/machinery/config/encoder) - ;; - github.com/letsencrypt/boulder) - ;; - github.com/hashicorp/*) - ;; - *) - not_allowed - ;; + github.com/talos-systems/talos/pkg/machinery/config/encoder) ;; + + github.com/letsencrypt/boulder) ;; + + github.com/hashicorp/*) ;; + + *) + not_allowed + ;; esac ;; AGPL-3.0) case ${pkg} in - github.com/edgelesssys/constellation/v2) - ;; - *) - not_allowed - ;; + github.com/edgelesssys/constellation/v2) ;; + + *) + not_allowed + ;; esac ;; Unknown) case ${pkg} in - *) - not_allowed - ;; + *) + not_allowed + ;; esac ;; @@ -58,8 +58,8 @@ while read -r line; do echo "unknown license: ${line}" err=1 ;; - esac + esac -done -exit "${err}" + done + exit "${err}" } diff --git a/hack/fetch-broken-e2e/fetch.sh b/hack/fetch-broken-e2e/fetch.sh index 3915e532c..e9e5d7285 100755 --- a/hack/fetch-broken-e2e/fetch.sh +++ b/hack/fetch-broken-e2e/fetch.sh @@ -3,20 +3,36 @@ set -euo pipefail shopt -s inherit_errexit -LATEST_AZURE_RUNS=$(gh run list -R edgelesssys/constellation -w 'e2e Test Azure' --json databaseId -q '.[].databaseId') +LATEST_AZURE_RUNS=$( + gh run list \ + -R edgelesssys/constellation \ + -w 'e2e Test Azure' \ + --json databaseId \ + -q '.[].databaseId' +) echo "${LATEST_AZURE_RUNS}" -for RUN_ID in ${LATEST_AZURE_RUNS} -do - # Might fail, because no state was written, because e2e pipeline failed early - # Or, because state was downloaded by earlier run of this script - gh run download "${RUN_ID}" -R edgelesssys/constellation -n constellation-state.json -D azure/"${RUN_ID}" || true +for RUN_ID in ${LATEST_AZURE_RUNS}; do + # Might fail, because no state was written, because e2e pipeline failed early + # Or, because state was downloaded by earlier run of this script + gh run download "${RUN_ID}" \ + -R edgelesssys/constellation \ + -n constellation-state.json \ + -D azure/"${RUN_ID}" || true done -LATEST_GCP_RUNS=$(gh run list -R edgelesssys/constellation -w 'e2e Test GCP' --json databaseId -q '.[].databaseId') +LATEST_GCP_RUNS=$( + gh run list \ + -R edgelesssys/constellation \ + -w 'e2e Test GCP' \ + --json databaseId \ + -q '.[].databaseId' +) echo "${LATEST_GCP_RUNS}" -for RUN_ID in ${LATEST_GCP_RUNS} -do - # Might fail, because no state was written, because e2e pipeline failed early - # Or, because state was downloaded by earlier run of this script - gh run download "${RUN_ID}" -R edgelesssys/constellation -n constellation-state.json -D gcp/"${RUN_ID}" || true +for RUN_ID in ${LATEST_GCP_RUNS}; do + # Might fail, because no state was written, because e2e pipeline failed early + # Or, because state was downloaded by earlier run of this script + gh run download "${RUN_ID}" \ + -R edgelesssys/constellation \ + -n constellation-state.json \ + -D gcp/"${RUN_ID}" || true done diff --git a/hack/fetch-broken-e2e/find.sh b/hack/fetch-broken-e2e/find.sh index 41ff88637..740358007 100755 --- a/hack/fetch-broken-e2e/find.sh +++ b/hack/fetch-broken-e2e/find.sh @@ -4,11 +4,10 @@ set -euo pipefail shopt -s inherit_errexit TO_DELETE=$(grep -lr "\"uid\": \"${1}\"" . || true) -if [[ -z "${TO_DELETE}" ]] -then - printf "Unable to find '%s'\n" "${1}" +if [[ -z ${TO_DELETE} ]]; then + printf "Unable to find '%s'\n" "${1}" else - printf "Statefile found. You should run:\n\n" - printf "cd %s\n" "${TO_DELETE}" - printf "constellation terminate --yes\n\n" + printf "Statefile found. You should run:\n\n" + printf "cd %s\n" "${TO_DELETE}" + printf "constellation terminate --yes\n\n" fi diff --git a/hack/importAzure.sh b/hack/importAzure.sh index ee4bf3271..f89e383bf 100755 --- a/hack/importAzure.sh +++ b/hack/importAzure.sh @@ -18,23 +18,20 @@ set -euo pipefail shopt -s inherit_errexit # Required tools -if ! command -v az &> /dev/null -then - echo "az CLI could not be found" - echo "Please instal it from: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli" - exit +if ! command -v az &> /dev/null; then + echo "az CLI could not be found" + echo "Please instal it from: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli" + exit fi -if ! command -v azcopy &> /dev/null -then - echo "azcopy could not be found" - echo "Please instal it from: https://docs.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-v10" - exit +if ! command -v azcopy &> /dev/null; then + echo "azcopy could not be found" + echo "Please instal it from: https://docs.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-v10" + exit fi -if ! command -v jq &> /dev/null -then - echo "jq could not be found" - echo "Please instal it from: https://github.com/stedolan/jq" - exit +if ! command -v jq &> /dev/null; then + echo "jq could not be found" + echo "Please instal it from: https://github.com/stedolan/jq" + exit fi AZURE_IMAGE_FILE="${AZURE_IMAGE_FILE:-$(pwd)/abcd}" @@ -47,17 +44,16 @@ AZURE_IMAGE_DEFINITION="${AZURE_IMAGE_DEFINITION:-constellation}" AZURE_SKU="${AZURE_SKU:-constellation}" AZURE_SECURITY_TYPE="${AZURE_SECURITY_TYPE:-TrustedLaunch}" -if [[ -z "${AZURE_RESOURCE_GROUP_NAME}" ]]; then +if [[ -z ${AZURE_RESOURCE_GROUP_NAME} ]]; then echo "Please provide a value for AZURE_RESOURCE_GROUP_NAME." exit 1 fi -if [[ -z "${AZURE_IMAGE_VERSION}" ]]; then +if [[ -z ${AZURE_IMAGE_VERSION} ]]; then echo "Please provide a value for AZURE_IMAGE_VERSION of pattern .." exit 1 fi - echo "Using following settings:" echo "AZURE_REGION=${AZURE_REGION}" echo "AZURE_RESOURCE_GROUP_NAME=${AZURE_RESOURCE_GROUP_NAME}" @@ -74,9 +70,15 @@ echo "" read -r -p "Continue (y/n)?" choice case "${choice}" in - y|Y ) echo "Starting import...";; - n|N ) echo "Abort!"; exit 1;; - * ) echo "invalid"; exit 1;; +y | Y) echo "Starting import..." ;; +n | N) + echo "Abort!" + exit 1 + ;; +*) + echo "invalid" + exit 1 + ;; esac echo "Preparing to upload '${AZURE_IMAGE_FILE} to Azure." @@ -97,20 +99,22 @@ az disk create \ echo "Waiting for disk to be created." az disk wait --created -n "${AZURE_IMAGE_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" echo "Retrieving disk ID." -AZURE_DISK_ID=$(az disk list \ +AZURE_DISK_ID=$( + az disk list \ --query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" \ - --output json \ - | jq -r \ + --output json | + jq -r ) echo "Disk ID is ${AZURE_DISK_ID}" echo "Generating SAS URL for authorized upload." -AZURE_SAS_URL=$(az disk grant-access \ +AZURE_SAS_URL=$( + az disk grant-access \ -n "${AZURE_IMAGE_NAME}" \ -g "${AZURE_RESOURCE_GROUP_NAME}" \ --access-level Write \ - --duration-in-seconds 86400 \ - | jq -r .accessSas \ + --duration-in-seconds 86400 | + jq -r .accessSas ) echo "Uploading image file to Azure disk." azcopy copy "${AZURE_IMAGE_FILE}" "${AZURE_SAS_URL}" --blob-type PageBlob @@ -143,9 +147,10 @@ az sig image-definition create \ --hyper-v-generation V2 \ --features SecurityType="${AZURE_SECURITY_TYPE}" echo "Retrieving temporary image ID." -AZURE_IMAGE_ID=$(az image list \ +AZURE_IMAGE_ID=$( + az image list \ --query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" \ - --output json | jq -r \ + --output json | jq -r ) echo "Creating final image version." @@ -163,13 +168,14 @@ echo "Cleaning up ephemeral resources." az image delete --ids "${AZURE_IMAGE_ID}" az disk delete -y --ids "${AZURE_DISK_ID}" -IMAGE_VERSION=$(az sig image-version show \ - --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ - --gallery-name "${AZURE_GALLERY_NAME}" \ - --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ - --gallery-image-version "${AZURE_IMAGE_VERSION}" \ - -o tsv \ - --query id \ +IMAGE_VERSION=$( + az sig image-version show \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + --gallery-name "${AZURE_GALLERY_NAME}" \ + --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ + --gallery-image-version "${AZURE_IMAGE_VERSION}" \ + -o tsv \ + --query id ) echo "Image ID is ${IMAGE_VERSION}" diff --git a/image/measured-boot/measure_util.sh b/image/measured-boot/measure_util.sh index 10f084b4a..bfb704838 100644 --- a/image/measured-boot/measure_util.sh +++ b/image/measured-boot/measure_util.sh @@ -9,24 +9,27 @@ set -euo pipefail shopt -s inherit_errexit pcr_extend() { - local CURRENT_PCR="$1" - local EXTEND_WITH="$2" - local HASH_FUNCTION="$3" - ( echo -n "${CURRENT_PCR}" | xxd -r -p ; echo -n "${EXTEND_WITH}" | xxd -r -p; ) | ${HASH_FUNCTION} | cut -d " " -f 1 + local CURRENT_PCR="$1" + local EXTEND_WITH="$2" + local HASH_FUNCTION="$3" + ( + echo -n "${CURRENT_PCR}" | xxd -r -p + echo -n "${EXTEND_WITH}" | xxd -r -p + ) | ${HASH_FUNCTION} | cut -d " " -f 1 } -extract () { - local image="$1" - local path="$2" - local output="$3" - sudo systemd-dissect --copy-from "${image}" "${path}" "${output}" +extract() { + local image="$1" + local path="$2" + local output="$3" + sudo systemd-dissect --copy-from "${image}" "${path}" "${output}" } -mktempdir () { - mktemp -d +mktempdir() { + mktemp -d } -cleanup () { - local dir="$1" - rm -rf "${dir}" +cleanup() { + local dir="$1" + rm -rf "${dir}" } diff --git a/image/measured-boot/precalculate_pcr_4.sh b/image/measured-boot/precalculate_pcr_4.sh index 2253be4aa..3041a38c1 100755 --- a/image/measured-boot/precalculate_pcr_4.sh +++ b/image/measured-boot/precalculate_pcr_4.sh @@ -13,14 +13,14 @@ source "$(dirname "$0")/measure_util.sh" ev_efi_action_sha256=3d6772b4f84ed47595d72a2c4c5ffd15f5bb72c7507fe26f2aaee2c69d5633ba ev_efi_separator_sha256=df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119 -authentihash () { - local path="$1" - "$(dirname "$0")/extract_authentihash.py" "${path}" +authentihash() { + local path="$1" + "$(dirname "$0")/extract_authentihash.py" "${path}" } -write_output () { - local out="$1" - cat > "${out}" < "${out}" << EOF { "pcr4": "${expected_pcr_4}", "efistages": [ @@ -63,9 +63,9 @@ expected_pcr_4=$(pcr_extend "${expected_pcr_4}" "${sd_boot_authentihash}" "sha25 expected_pcr_4=$(pcr_extend "${expected_pcr_4}" "${uki_authentihash}" "sha256sum") echo "Authentihashes:" -echo "Stage 1 – shim: ${shim_authentihash}" -echo "Stage 2 – sd-boot: ${sd_boot_authentihash}" -echo "Stage 3 – Unified Kernel Image (UKI): ${uki_authentihash}" +echo "Stage 1 - shim: ${shim_authentihash}" +echo "Stage 2 - sd-boot: ${sd_boot_authentihash}" +echo "Stage 3 - Unified Kernel Image (UKI): ${uki_authentihash}" echo "" echo "Expected PCR[4]: ${expected_pcr_4}" echo "" diff --git a/image/measured-boot/precalculate_pcr_8.sh b/image/measured-boot/precalculate_pcr_8.sh index ad395cd82..8f8a8c9a7 100755 --- a/image/measured-boot/precalculate_pcr_8.sh +++ b/image/measured-boot/precalculate_pcr_8.sh @@ -13,26 +13,26 @@ set -euo pipefail shopt -s inherit_errexit source "$(dirname "$0")/measure_util.sh" -get_cmdline_from_uki () { - local uki="$1" - local output="$2" - objcopy -O binary --only-section=.cmdline "${uki}" "${output}" +get_cmdline_from_uki() { + local uki="$1" + local output="$2" + objcopy -O binary --only-section=.cmdline "${uki}" "${output}" } -cmdline_measure () { - local path="$1" - local tmp - tmp=$(mktemp) - # convert to utf-16le and add a null terminator - iconv -f utf-8 -t utf-16le "${path}" -o "${tmp}" - truncate -s +2 "${tmp}" - sha256sum "${tmp}" | cut -d " " -f 1 - rm "${tmp}" +cmdline_measure() { + local path="$1" + local tmp + tmp=$(mktemp) + # convert to utf-16le and add a null terminator + iconv -f utf-8 -t utf-16le "${path}" -o "${tmp}" + truncate -s +2 "${tmp}" + sha256sum "${tmp}" | cut -d " " -f 1 + rm "${tmp}" } -write_output () { - local out="$1" - cat > "${out}" < "${out}" << EOF { "pcr8": "${expected_pcr_8}", "cmdline": "${cmdline}" @@ -58,10 +58,10 @@ cleanup "${DIR}" expected_pcr_8=0000000000000000000000000000000000000000000000000000000000000000 expected_pcr_8=$(pcr_extend "${expected_pcr_8}" "${cmdline_hash}" "sha256sum") -if [[ "${CSP}" == "azure" ]]; then - # Azure displays the boot menu - # triggering an extra measurement of the kernel command line. - expected_pcr_8=$(pcr_extend "${expected_pcr_8}" "${cmdline_hash}" "sha256sum") +if [[ ${CSP} == "azure" ]]; then + # Azure displays the boot menu + # triggering an extra measurement of the kernel command line. + expected_pcr_8=$(pcr_extend "${expected_pcr_8}" "${cmdline_hash}" "sha256sum") fi echo "Kernel commandline: ${cmdline}" diff --git a/image/measured-boot/precalculate_pcr_9.sh b/image/measured-boot/precalculate_pcr_9.sh index df4ff9078..d2ceb6fcc 100755 --- a/image/measured-boot/precalculate_pcr_9.sh +++ b/image/measured-boot/precalculate_pcr_9.sh @@ -12,21 +12,20 @@ shopt -s inherit_errexit source "$(dirname "$0")/measure_util.sh" -get_initrd_from_uki () { - local uki="$1" - local output="$2" - objcopy -O binary --only-section=.initrd "${uki}" "${output}" +get_initrd_from_uki() { + local uki="$1" + local output="$2" + objcopy -O binary --only-section=.initrd "${uki}" "${output}" } -initrd_measure () { - local path="$1" - sha256sum "${path}" | cut -d " " -f 1 +initrd_measure() { + local path="$1" + sha256sum "${path}" | cut -d " " -f 1 } - -write_output () { - local out="$1" - cat > "${out}" < "${out}" << EOF { "pcr9": "${expected_pcr_9}", "initrd": "${initrd_hash}" diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/azure-provisioning.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/azure-provisioning.sh index d77dbf460..c2f2ad0c6 100755 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/azure-provisioning.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/azure-provisioning.sh @@ -5,33 +5,31 @@ set -euo pipefail shopt -s inherit_errexit attempts=1 -until [[ "${attempts}" -gt 5 ]] -do - echo "obtaining goal state - attempt ${attempts}" - goalstate=$(curl --fail -v -X 'GET' -H "x-ms-agent-name: azure-vm-register" \ - -H "Content-Type: text/xml;charset=utf-8" \ - -H "x-ms-version: 2012-11-30" \ - "http://168.63.129.16/machine/?comp=goalstate") - if [[ $? -eq 0 ]] - then - echo "successfully retrieved goal state" - retrieved_goal_state=true - break - fi - sleep 5 - attempts=$((attempts+1)) +until [[ ${attempts} -gt 5 ]]; do + echo "obtaining goal state - attempt ${attempts}" + goalstate=$(curl --fail -v -X 'GET' -H "x-ms-agent-name: azure-vm-register" \ + -H "Content-Type: text/xml;charset=utf-8" \ + -H "x-ms-version: 2012-11-30" \ + "http://168.63.129.16/machine/?comp=goalstate") + if [[ $? -eq 0 ]]; then + echo "successfully retrieved goal state" + retrieved_goal_state=true + break + fi + sleep 5 + attempts=$((attempts + 1)) done -if [[ "${retrieved_goal_state}" != "true" ]] -then - echo "failed to obtain goal state - cannot register this VM" - exit 1 +if [[ ${retrieved_goal_state} != "true" ]]; then + echo "failed to obtain goal state - cannot register this VM" + exit 1 fi container_id=$(grep ContainerId <<< "${goalstate}" | sed 's/\s*<\/*ContainerId>//g' | sed 's/\r$//') instance_id=$(grep InstanceId <<< "${goalstate}" | sed 's/\s*<\/*InstanceId>//g' | sed 's/\r$//') -ready_doc=$(cat << EOF +ready_doc=$( + cat << EOF 1 @@ -51,18 +49,16 @@ EOF ) attempts=1 -until [[ "${attempts}" -gt 5 ]] -do - echo "registering with Azure - attempt ${attempts}" - curl --fail -v -X 'POST' -H "x-ms-agent-name: azure-vm-register" \ - -H "Content-Type: text/xml;charset=utf-8" \ - -H "x-ms-version: 2012-11-30" \ - -d "${ready_doc}" \ - "http://168.63.129.16/machine?comp=health" - if [[ $? -eq 0 ]] - then - echo "successfully register with Azure" - break - fi - sleep 5 # sleep to prevent throttling from wire server +until [[ ${attempts} -gt 5 ]]; do + echo "registering with Azure - attempt ${attempts}" + curl --fail -v -X 'POST' -H "x-ms-agent-name: azure-vm-register" \ + -H "Content-Type: text/xml;charset=utf-8" \ + -H "x-ms-version: 2012-11-30" \ + -d "${ready_doc}" \ + "http://168.63.129.16/machine?comp=health" + if [[ $? -eq 0 ]]; then + echo "successfully register with Azure" + break + fi + sleep 5 # sleep to prevent throttling from wire server done diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/module-setup.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/module-setup.sh index 064218bba..f19a22a0e 100755 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/module-setup.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/module-setup.sh @@ -7,27 +7,29 @@ set -euo pipefail shopt -s inherit_errexit depends() { - echo systemd + echo systemd } install_and_enable_unit() { - unit="$1"; shift - target="$1"; shift - inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}" - mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants" - ln_r "${systemdsystemunitdir}/${unit}" \ - "${systemdsystemconfdir}/${target}.wants/${unit}" + unit="$1" + shift + target="$1" + shift + inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}" + mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants" + ln_r "${systemdsystemunitdir}/${unit}" \ + "${systemdsystemconfdir}/${target}.wants/${unit}" } install() { - inst_multiple \ - bash \ - curl \ - grep \ - sed + inst_multiple \ + bash \ + curl \ + grep \ + sed - inst_script "${moddir}/azure-provisioning.sh" \ - "/usr/local/bin/azure-provisioning" - install_and_enable_unit "azure-provisioning.service" \ - "basic.target" + inst_script "${moddir}/azure-provisioning.sh" \ + "/usr/local/bin/azure-provisioning" + install_and_enable_unit "azure-provisioning.service" \ + "basic.target" } diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/aws-nvme-disk.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/aws-nvme-disk.sh index 2586d4c49..9cef6dbb5 100644 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/aws-nvme-disk.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/aws-nvme-disk.sh @@ -10,20 +10,18 @@ AWS_STATE_DISK_DEVICENAME="sdb" AWS_STATE_DISK_SYMLINK="/dev/${AWS_STATE_DISK_DEVICENAME}" # hack: aws nvme udev rules are never executed. Create symlinks for the nvme devices manually. -while [[ ! -L "${AWS_STATE_DISK_SYMLINK}" ]] -do - for nvmedisk in /dev/nvme*n1 - do - linkname=$(nvme amzn id-ctrl -b "${nvmedisk}" | tail -c +3072 | tr -d ' ') || true - if [[ -n "${linkname}" ]] && [[ "${linkname}" == "${AWS_STATE_DISK_DEVICENAME}" ]]; then - ln -s "${nvmedisk}" "${AWS_STATE_DISK_SYMLINK}" - fi - done - if [[ -L "${AWS_STATE_DISK_SYMLINK}" ]]; then - break +while [[ ! -L ${AWS_STATE_DISK_SYMLINK} ]]; do + for nvmedisk in /dev/nvme*n1; do + linkname=$(nvme amzn id-ctrl -b "${nvmedisk}" | tail -c +3072 | tr -d ' ') || true + if [[ -n ${linkname} ]] && [[ ${linkname} == "${AWS_STATE_DISK_DEVICENAME}" ]]; then + ln -s "${nvmedisk}" "${AWS_STATE_DISK_SYMLINK}" fi - echo "Waiting for state disk to appear.." - sleep 2 + done + if [[ -L ${AWS_STATE_DISK_SYMLINK} ]]; then + break + fi + echo "Waiting for state disk to appear.." + sleep 2 done echo "AWS state disk found" diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/module-setup.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/module-setup.sh index b86db8191..9ea2f62fd 100644 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/module-setup.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/module-setup.sh @@ -4,73 +4,76 @@ # SPDX-License-Identifier: AGPL-3.0-only depends() { - # expands to: systemd systemd-hostnamed systemd-networkd systemd-resolved systemd-timedated systemd-timesyncd - echo systemd-network-management + # expands to: systemd systemd-hostnamed systemd-networkd systemd-resolved systemd-timedated systemd-timesyncd + echo systemd-network-management } install_and_enable_unit() { - unit="$1"; shift - target="$1"; shift - inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}" - mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants" - ln_r "${systemdsystemunitdir}/${unit}" \ - "${systemdsystemconfdir}/${target}.wants/${unit}" + unit="$1" + shift + target="$1" + shift + inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}" + mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants" + ln_r "${systemdsystemunitdir}/${unit}" \ + "${systemdsystemconfdir}/${target}.wants/${unit}" } install_path() { - local dir="$1"; shift - mkdir -p "${initdir}/${dir}" + local dir="$1" + shift + mkdir -p "${initdir}/${dir}" } install() { - inst_multiple \ - bash - inst_script "/usr/sbin/disk-mapper" \ - "/usr/sbin/disk-mapper" + inst_multiple \ + bash + inst_script "/usr/sbin/disk-mapper" \ + "/usr/sbin/disk-mapper" - inst_script "${moddir}/prepare-state-disk.sh" \ - "/usr/sbin/prepare-state-disk" - install_and_enable_unit "prepare-state-disk.service" \ - "basic.target" - install_and_enable_unit "configure-constel-csp.service" \ - "basic.target" + inst_script "${moddir}/prepare-state-disk.sh" \ + "/usr/sbin/prepare-state-disk" + install_and_enable_unit "prepare-state-disk.service" \ + "basic.target" + install_and_enable_unit "configure-constel-csp.service" \ + "basic.target" - # aws nvme disks - inst_multiple \ - tail \ - tr + # aws nvme disks + inst_multiple \ + tail \ + tr - # azure scsi disks - inst_multiple \ - cut \ - readlink + # azure scsi disks + inst_multiple \ + cut \ + readlink - # gcp nvme disks - inst_multiple \ - date \ - xxd \ - grep \ - sed \ - ln \ - command \ - readlink + # gcp nvme disks + inst_multiple \ + date \ + xxd \ + grep \ + sed \ + ln \ + command \ + readlink - inst_script "/usr/sbin/nvme" \ - "/usr/sbin/nvme" - inst_script "/usr/lib/udev/google_nvme_id" \ - "/usr/lib/udev/google_nvme_id" - inst_rules "64-gce-disk-removal.rules" "65-gce-disk-naming.rules" + inst_script "/usr/sbin/nvme" \ + "/usr/sbin/nvme" + inst_script "/usr/lib/udev/google_nvme_id" \ + "/usr/lib/udev/google_nvme_id" + inst_rules "64-gce-disk-removal.rules" "65-gce-disk-naming.rules" - inst_script "${moddir}/aws-nvme-disk.sh" \ - "/usr/sbin/aws-nvme-disk" - install_and_enable_unit "aws-nvme-disk.service" \ - "basic.target" + inst_script "${moddir}/aws-nvme-disk.sh" \ + "/usr/sbin/aws-nvme-disk" + install_and_enable_unit "aws-nvme-disk.service" \ + "basic.target" - # TLS / CA store in initramfs - install_path /etc/pki/tls/certs/ - inst_simple /etc/pki/tls/certs/ca-bundle.crt \ - /etc/pki/tls/certs/ca-bundle.crt + # TLS / CA store in initramfs + install_path /etc/pki/tls/certs/ + inst_simple /etc/pki/tls/certs/ca-bundle.crt \ + /etc/pki/tls/certs/ca-bundle.crt - # backport of https://github.com/dracutdevs/dracut/commit/dcbe23c14d13ca335ad327b7bb985071ca442f12 - inst_simple "${moddir}/sysusers-dracut.conf" "${systemdsystemunitdir}/systemd-sysusers.service.d/sysusers-dracut.conf" + # backport of https://github.com/dracutdevs/dracut/commit/dcbe23c14d13ca335ad327b7bb985071ca442f12 + inst_simple "${moddir}/sysusers-dracut.conf" "${systemdsystemunitdir}/systemd-sysusers.service.d/sysusers-dracut.conf" } diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/prepare-state-disk.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/prepare-state-disk.sh index 424514802..86db9f8b0 100644 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/prepare-state-disk.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/prepare-state-disk.sh @@ -10,7 +10,7 @@ shopt -s inherit_errexit # Store encryption key (random or recovered key) in /run/cryptsetup-keys.d/state.key disk-mapper -csp "${CONSTEL_CSP}" if [[ $? -ne 0 ]]; then - echo "Failed to prepare state disk" - sleep 2 # give the serial console time to print the error message - exit $? # exit with the same error code as disk-mapper + echo "Failed to prepare state disk" + sleep 2 # give the serial console time to print the error message + exit $? # exit with the same error code as disk-mapper fi diff --git a/image/mkosi.skeleton/usr/lib/udev/google_nvme_id b/image/mkosi.skeleton/usr/lib/udev/google_nvme_id index e8933e8c7..985fe7306 100755 --- a/image/mkosi.skeleton/usr/lib/udev/google_nvme_id +++ b/image/mkosi.skeleton/usr/lib/udev/google_nvme_id @@ -59,7 +59,7 @@ function get_namespace_device_name() { return 1 fi - if [[ -z "${nvme_json}" ]]; then + if [[ -z ${nvme_json} ]]; then err "NVMe Vendor Extension disk information not present" return 1 fi @@ -68,7 +68,7 @@ function get_namespace_device_name() { device_name="$(echo "${nvme_json}" | grep device_name | sed -e 's/.*"device_name":[ \t]*"\([a-zA-Z0-9_-]\+\)".*/\1/')" # Error if our device name is empty - if [[ -z "${device_name}" ]]; then + if [[ -z ${device_name} ]]; then err "Empty name" return 1 fi @@ -91,7 +91,7 @@ function get_namespace_device_name() { function get_namespace_number() { local dev_path="$1" local namespace_number - if [[ "${dev_path}" =~ ${NAMESPACE_NUMBER_REGEX} ]]; then + if [[ ${dev_path} =~ ${NAMESPACE_NUMBER_REGEX} ]]; then namespace_number="${BASH_REMATCH[1]}" else return 1 @@ -114,7 +114,7 @@ function get_namespace_number() { function get_partition_number() { local dev_path="$1" local partition_number - if [[ "${dev_path}" =~ ${PARTITION_NUMBER_REGEX} ]]; then + if [[ ${dev_path} =~ ${PARTITION_NUMBER_REGEX} ]]; then partition_number="${BASH_REMATCH[1]}" echo "${partition_number}" else @@ -136,7 +136,7 @@ function gen_symlink() { local partition_number partition_number="$(get_partition_number "${dev_path}")" - if [[ -n "${partition_number}" ]]; then + if [[ -n ${partition_number} ]]; then ln -s "${dev_path}" /dev/disk/by-id/google-"${ID_SERIAL_SHORT}"-part"${partition_number}" > /dev/null 2>&1 else ln -s "${dev_path}" /dev/disk/by-id/google-"${ID_SERIAL_SHORT}" > /dev/null 2>&1 @@ -182,19 +182,21 @@ function main() { while getopts :d:sh flag; do case "${flag}" in - d) device_path="${OPTARG}";; - s) opt_gen_symlink='true';; - h) print_help_message - return 0 - ;; - :) echo "Invalid option: ${OPTARG} requires an argument" 1>&2 - return 1 - ;; - *) return 1 + d) device_path="${OPTARG}" ;; + s) opt_gen_symlink='true' ;; + h) + print_help_message + return 0 + ;; + :) + echo "Invalid option: ${OPTARG} requires an argument" 1>&2 + return 1 + ;; + *) return 1 ;; esac done - if [[ -z "${device_path}" ]]; then + if [[ -z ${device_path} ]]; then echo "Device path (-d) argument required. Use -h for full usage." 1>&2 exit 1 fi @@ -208,7 +210,7 @@ with sudo or install nvme-cli." fi # Ensure the passed device is actually an NVMe device - "${nvme_cli_bin}" id-ctrl "${device_path}" &>/dev/null + "${nvme_cli_bin}" id-ctrl "${device_path}" &> /dev/null if [[ $? -ne 0 ]]; then err "Passed device was not an NVMe device. (You may need to run this \ script as root/with sudo)." @@ -218,7 +220,7 @@ script as root/with sudo)." # Detect the type of attached nvme device local controller_id controller_id=$("${nvme_cli_bin}" id-ctrl "${device_path}") - if [[ ! "${controller_id}" =~ nvme_card-pd ]] ; then + if [[ ! ${controller_id} =~ nvme_card-pd ]]; then err "Device is not a PD-NVMe device" return 1 fi @@ -231,7 +233,7 @@ script as root/with sudo)." fi # Gen symlinks or print out the globals set by the identify command - if [[ "${opt_gen_symlink}" == 'true' ]]; then + if [[ ${opt_gen_symlink} == 'true' ]]; then gen_symlink "${device_path}" else # These will be consumed by udev diff --git a/image/mkosi.skeleton/usr/libexec/constellation-pcrs b/image/mkosi.skeleton/usr/libexec/constellation-pcrs index 9c08c9235..ad942d8ac 100755 --- a/image/mkosi.skeleton/usr/libexec/constellation-pcrs +++ b/image/mkosi.skeleton/usr/libexec/constellation-pcrs @@ -7,8 +7,8 @@ # and prints the message to the serial console main() { - pcr_state="$(tpm2_pcrread sha256)" - echo -e "PCR state:\n${pcr_state}\n" > /run/issue.d/35_constellation_pcrs.issue + pcr_state="$(tpm2_pcrread sha256)" + echo -e "PCR state:\n${pcr_state}\n" > /run/issue.d/35_constellation_pcrs.issue } main diff --git a/image/secure-boot/azure/delete.sh b/image/secure-boot/azure/delete.sh index f98843144..788acbdf3 100755 --- a/image/secure-boot/azure/delete.sh +++ b/image/secure-boot/azure/delete.sh @@ -3,27 +3,27 @@ set -euo pipefail shopt -s inherit_errexit -if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then - # shellcheck source=/dev/null - . "${CONFIG_FILE}" +if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then + # shellcheck source=/dev/null + . "${CONFIG_FILE}" fi POSITIONAL_ARGS=() while [[ $# -gt 0 ]]; do case $1 in - -n|--name) - AZURE_VM_NAME="$2" - shift # past argument - shift # past value - ;; - -*) - echo "Unknown option $1" - exit 1 - ;; - *) - POSITIONAL_ARGS+=("$1") # save positional arg - shift # past argument - ;; + -n | --name) + AZURE_VM_NAME="$2" + shift # past argument + shift # past value + ;; + -*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; esac done @@ -38,34 +38,33 @@ SUBNET=$(echo "${NIC_INFO}" | jq -r '.ipConfigurations[0].subnet.id') VNET=${SUBNET//\/subnets\/.*/} DISK=$(echo "${AZ_VM_INFO}" | jq -r '.storageProfile.osDisk.managedDisk.id') - -delete_vm () { - az vm delete -y --name "${AZURE_VM_NAME}" \ - --resource-group "${AZURE_RESOURCE_GROUP_NAME}" || true +delete_vm() { + az vm delete -y --name "${AZURE_VM_NAME}" \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" || true } -delete_vnet () { - az network vnet delete --ids "${VNET}" || true +delete_vnet() { + az network vnet delete --ids "${VNET}" || true } -delete_subnet () { - az network vnet subnet delete --ids "${SUBNET}" || true +delete_subnet() { + az network vnet subnet delete --ids "${SUBNET}" || true } -delete_nsg () { - az network nsg delete --ids "${NSG}" || true +delete_nsg() { + az network nsg delete --ids "${NSG}" || true } -delete_pubip () { - az network public-ip delete --ids "${PUBIP}" || true +delete_pubip() { + az network public-ip delete --ids "${PUBIP}" || true } -delete_disk () { - az disk delete -y --ids "${DISK}" || true +delete_disk() { + az disk delete -y --ids "${DISK}" || true } -delete_nic () { - az network nic delete --ids "${NIC}" || true +delete_nic() { + az network nic delete --ids "${NIC}" || true } delete_vm diff --git a/image/secure-boot/azure/extract_vmgs.sh b/image/secure-boot/azure/extract_vmgs.sh index cc1666f4c..3776efd70 100755 --- a/image/secure-boot/azure/extract_vmgs.sh +++ b/image/secure-boot/azure/extract_vmgs.sh @@ -3,28 +3,28 @@ set -euo pipefail shopt -s inherit_errexit -if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then - # shellcheck source=/dev/null - . "${CONFIG_FILE}" +if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then + # shellcheck source=/dev/null + . "${CONFIG_FILE}" fi AZURE_SUBSCRIPTION=$(az account show --query id -o tsv) POSITIONAL_ARGS=() while [[ $# -gt 0 ]]; do case $1 in - -n|--name) - AZURE_VM_NAME="$2" - shift # past argument - shift # past value - ;; - -*) - echo "Unknown option $1" - exit 1 - ;; - *) - POSITIONAL_ARGS+=("$1") # save positional arg - shift # past argument - ;; + -n | --name) + AZURE_VM_NAME="$2" + shift # past argument + shift # past value + ;; + -*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; esac done @@ -34,10 +34,10 @@ VM_DISK=$(az vm show -g "${AZURE_RESOURCE_GROUP_NAME}" --name "${AZURE_VM_NAME}" LOCATION=$(az disk show --ids "${VM_DISK}" --query "location" -o tsv) az snapshot create \ - -g "${AZURE_RESOURCE_GROUP_NAME}" \ - --source "${VM_DISK}" \ - --name "${AZURE_SNAPSHOT_NAME}" \ - -l "${LOCATION}" + -g "${AZURE_RESOURCE_GROUP_NAME}" \ + --source "${VM_DISK}" \ + --name "${AZURE_SNAPSHOT_NAME}" \ + -l "${LOCATION}" # Azure CLI does not implement getSecureVMGuestStateSAS for snapshots yet # az snapshot grant-access \ @@ -47,11 +47,11 @@ az snapshot create \ # -g "${AZURE_RESOURCE_GROUP_NAME}" BEGIN=$(az rest \ - --method post \ - --url "https://management.azure.com/subscriptions/${AZURE_SUBSCRIPTION}/resourceGroups/${AZURE_RESOURCE_GROUP_NAME}/providers/Microsoft.Compute/snapshots/${AZURE_SNAPSHOT_NAME}/beginGetAccess" \ - --uri-parameters api-version="2021-12-01" \ - --body '{"access": "Read", "durationInSeconds": 3600, "getSecureVMGuestStateSAS": true}' \ - --verbose 2>&1) + --method post \ + --url "https://management.azure.com/subscriptions/${AZURE_SUBSCRIPTION}/resourceGroups/${AZURE_RESOURCE_GROUP_NAME}/providers/Microsoft.Compute/snapshots/${AZURE_SNAPSHOT_NAME}/beginGetAccess" \ + --uri-parameters api-version="2021-12-01" \ + --body '{"access": "Read", "durationInSeconds": 3600, "getSecureVMGuestStateSAS": true}' \ + --verbose 2>&1) ASYNC_OPERATION_URI=$(echo "${BEGIN}" | grep Azure-AsyncOperation | cut -d ' ' -f 7 | tr -d "'") sleep 10 ACCESS=$(az rest --method get --url "${ASYNC_OPERATION_URI}") @@ -60,9 +60,9 @@ VMGS_URL=$(echo "${ACCESS}" | jq -r '.properties.output.securityDataAccessSAS') curl -L -o "${AZURE_VMGS_FILENAME}" "${VMGS_URL}" az snapshot revoke-access \ - --name "${AZURE_SNAPSHOT_NAME}" \ - -g "${AZURE_RESOURCE_GROUP_NAME}" + --name "${AZURE_SNAPSHOT_NAME}" \ + -g "${AZURE_RESOURCE_GROUP_NAME}" az snapshot delete \ - --name "${AZURE_SNAPSHOT_NAME}" \ - -g "${AZURE_RESOURCE_GROUP_NAME}" + --name "${AZURE_SNAPSHOT_NAME}" \ + -g "${AZURE_RESOURCE_GROUP_NAME}" echo "VMGS saved to ${AZURE_VMGS_FILENAME}" diff --git a/image/secure-boot/azure/launch.sh b/image/secure-boot/azure/launch.sh index be0e6a2ac..8c24385b6 100755 --- a/image/secure-boot/azure/launch.sh +++ b/image/secure-boot/azure/launch.sh @@ -3,101 +3,101 @@ set -euo pipefail shopt -s inherit_errexit -if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then - # shellcheck source=/dev/null - . "${CONFIG_FILE}" +if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then + # shellcheck source=/dev/null + . "${CONFIG_FILE}" fi POSITIONAL_ARGS=() while [[ $# -gt 0 ]]; do case $1 in - -n|--name) - AZURE_VM_NAME="$2" - shift # past argument - shift # past value - ;; - -g|--gallery) - CREATE_FROM_GALLERY=YES - shift # past argument - ;; - -d|--disk) - CREATE_FROM_GALLERY=NO - shift # past argument - ;; - --secure-boot) - AZURE_SECURE_BOOT="$2" - shift # past argument - shift # past value - ;; - --disk-name) - AZURE_DISK_NAME="$2" - shift # past argument - shift # past value - ;; - -*) - echo "Unknown option $1" - exit 1 - ;; - *) - POSITIONAL_ARGS+=("$1") # save positional arg - shift # past argument - ;; + -n | --name) + AZURE_VM_NAME="$2" + shift # past argument + shift # past value + ;; + -g | --gallery) + CREATE_FROM_GALLERY=YES + shift # past argument + ;; + -d | --disk) + CREATE_FROM_GALLERY=NO + shift # past argument + ;; + --secure-boot) + AZURE_SECURE_BOOT="$2" + shift # past argument + shift # past value + ;; + --disk-name) + AZURE_DISK_NAME="$2" + shift # past argument + shift # past value + ;; + -*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; esac done set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters -if [[ "${AZURE_SECURITY_TYPE}" == "ConfidentialVM" ]]; then +if [[ ${AZURE_SECURITY_TYPE} == "ConfidentialVM" ]]; then VMSIZE="Standard_DC2as_v5" -elif [[ "${AZURE_SECURITY_TYPE}" == "TrustedLaunch" ]]; then +elif [[ ${AZURE_SECURITY_TYPE} == "TrustedLaunch" ]]; then VMSIZE="standard_D2as_v5" else echo "Unknown security type: ${AZURE_SECURITY_TYPE}" exit 1 fi -create_vm_from_disk () { - AZURE_DISK_REFERENCE=$(az disk show --resource-group "${AZURE_RESOURCE_GROUP_NAME}" --name "${AZURE_DISK_NAME}" --query id -o tsv) - az vm create --name "${AZURE_VM_NAME}" \ - --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ - -l "${AZURE_REGION}" \ - --size "${VMSIZE}" \ - --public-ip-sku Standard \ - --os-type Linux \ - --attach-os-disk "${AZURE_DISK_REFERENCE}" \ - --security-type "${AZURE_SECURITY_TYPE}" \ - --os-disk-security-encryption-type VMGuestStateOnly \ - --enable-vtpm true \ - --enable-secure-boot "${AZURE_SECURE_BOOT}" \ - --boot-diagnostics-storage "" \ - --no-wait +create_vm_from_disk() { + AZURE_DISK_REFERENCE=$(az disk show --resource-group "${AZURE_RESOURCE_GROUP_NAME}" --name "${AZURE_DISK_NAME}" --query id -o tsv) + az vm create --name "${AZURE_VM_NAME}" \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + --size "${VMSIZE}" \ + --public-ip-sku Standard \ + --os-type Linux \ + --attach-os-disk "${AZURE_DISK_REFERENCE}" \ + --security-type "${AZURE_SECURITY_TYPE}" \ + --os-disk-security-encryption-type VMGuestStateOnly \ + --enable-vtpm true \ + --enable-secure-boot "${AZURE_SECURE_BOOT}" \ + --boot-diagnostics-storage "" \ + --no-wait } -create_vm_from_sig () { - AZURE_IMAGE_REFERENCE=$(az sig image-version show \ - --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ - --gallery-image-version "${AZURE_IMAGE_VERSION}" \ - --gallery-name "${AZURE_GALLERY_NAME}" \ - -g "${AZURE_RESOURCE_GROUP_NAME}" \ - --query id -o tsv) - az vm create --name "${AZURE_VM_NAME}" \ - --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ - -l "${AZURE_REGION}" \ - --size "${VMSIZE}" \ - --public-ip-sku Standard \ - --image "${AZURE_IMAGE_REFERENCE}" \ - --security-type "${AZURE_SECURITY_TYPE}" \ - --os-disk-security-encryption-type VMGuestStateOnly \ - --enable-vtpm true \ - --enable-secure-boot "${AZURE_SECURE_BOOT}" \ - --boot-diagnostics-storage "" \ - --no-wait +create_vm_from_sig() { + AZURE_IMAGE_REFERENCE=$(az sig image-version show \ + --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ + --gallery-image-version "${AZURE_IMAGE_VERSION}" \ + --gallery-name "${AZURE_GALLERY_NAME}" \ + -g "${AZURE_RESOURCE_GROUP_NAME}" \ + --query id -o tsv) + az vm create --name "${AZURE_VM_NAME}" \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + --size "${VMSIZE}" \ + --public-ip-sku Standard \ + --image "${AZURE_IMAGE_REFERENCE}" \ + --security-type "${AZURE_SECURITY_TYPE}" \ + --os-disk-security-encryption-type VMGuestStateOnly \ + --enable-vtpm true \ + --enable-secure-boot "${AZURE_SECURE_BOOT}" \ + --boot-diagnostics-storage "" \ + --no-wait } -if [[ "${CREATE_FROM_GALLERY}" = "YES" ]]; then - create_vm_from_sig +if [[ ${CREATE_FROM_GALLERY} == "YES" ]]; then + create_vm_from_sig else - create_vm_from_disk + create_vm_from_disk fi sleep 30 diff --git a/image/secure-boot/generate_nvram_vars.sh b/image/secure-boot/generate_nvram_vars.sh index f1ccffc89..972d1ddbb 100755 --- a/image/secure-boot/generate_nvram_vars.sh +++ b/image/secure-boot/generate_nvram_vars.sh @@ -6,90 +6,90 @@ set -euo pipefail shopt -s inherit_errexit -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd) BASE_DIR=$(realpath "${SCRIPT_DIR}/..") # Set to qemu+tcp://localhost:16599/system for dockerized libvirt setup -if [[ -z "${LIBVIRT_SOCK}" ]]; then - LIBVIRT_SOCK=qemu:///system +if [[ -z ${LIBVIRT_SOCK} ]]; then + LIBVIRT_SOCK=qemu:///system fi -libvirt_nvram_gen () { - local image_path="${1}" - if test -f "${BASE_DIR}/image.nvram.template"; then - echo "NVRAM template already generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)" - return - fi - if ! test -f "${image_path}"; then - echo "Image \"${image_path}\" does not exist yet. To generate nvram, create disk image first." - return - fi +libvirt_nvram_gen() { + local image_path="${1}" + if test -f "${BASE_DIR}/image.nvram.template"; then + echo "NVRAM template already generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)" + return + fi + if ! test -f "${image_path}"; then + echo "Image \"${image_path}\" does not exist yet. To generate nvram, create disk image first." + return + fi - OVMF_CODE=/usr/share/OVMF/OVMF_CODE_4M.ms.fd - OVMF_VARS=/usr/share/OVMF/OVMF_VARS_4M.ms.fd - if ! test -f "${OVMF_CODE}"; then - OVMF_CODE=/usr/share/OVMF/OVMF_CODE.secboot.fd - fi - if ! test -f "${OVMF_VARS}"; then - OVMF_VARS=/usr/share/OVMF/OVMF_VARS.secboot.fd - fi + OVMF_CODE=/usr/share/OVMF/OVMF_CODE_4M.ms.fd + OVMF_VARS=/usr/share/OVMF/OVMF_VARS_4M.ms.fd + if ! test -f "${OVMF_CODE}"; then + OVMF_CODE=/usr/share/OVMF/OVMF_CODE.secboot.fd + fi + if ! test -f "${OVMF_VARS}"; then + OVMF_VARS=/usr/share/OVMF/OVMF_VARS.secboot.fd + fi - echo "Using OVMF_CODE: ${OVMF_CODE}" - echo "Using OVMF_VARS: ${OVMF_VARS}" + echo "Using OVMF_CODE: ${OVMF_CODE}" + echo "Using OVMF_VARS: ${OVMF_VARS}" - # generate nvram file using libvirt - virt-install --name constell-nvram-gen \ - --connect "${LIBVIRT_SOCK}" \ - --nonetworks \ - --description 'Constellation' \ - --ram 1024 \ - --vcpus 1 \ - --osinfo detect=on,require=off \ - --disk "${image_path},format=raw" \ - --boot "machine=q35,menu=on,loader=${OVMF_CODE},loader.readonly=yes,loader.type=pflash,nvram.template=${OVMF_VARS},nvram=${BASE_DIR}/image.nvram,loader_secure=yes" \ - --features smm.state=on \ - --noautoconsole - echo -e 'connect using' - echo -e ' \u001b[1mvirsh console constell-nvram-gen\u001b[0m' - echo -e '' - echo -e 'Load db cert with MokManager or enroll full PKI with firmware setup' - echo -e '' - echo -e ' \u001b[1mMokManager\u001b[0m' - echo -e ' For mokmanager, try to boot as usual. You will see this message:' - echo -e ' > "Verification failed: (0x1A) Security Violation"' - echo -e ' Press OK, then ENTER, then "Enroll key from disk"' - echo -e ' Select the following key:' - echo -e ' > \u001b[1m/EFI/loader/keys/auto/db.cer\u001b[0m' - echo -e ' Press Continue, then choose "Yes" to the question "Enroll the key(s)?"' - echo -e ' Choose reboot and continue this script.' - echo -e '' - echo -e ' \u001b[1mFirmware setup\u001b[0m' - echo -e ' For firmware setup, press F2.' - echo -e ' Go to "Device Manager">"Secure Boot Configuration">"Secure Boot Mode"' - echo -e ' Choose "Custom Mode"' - echo -e ' Go to "Custom Securee Boot Options"' - echo -e ' Go to "PK Options">"Enroll PK", Press "Y" if queried, "Enroll PK using File"' - echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/PK.cer\u001b[0m' - echo -e ' Choose "Commit Changes and Exit"' - echo -e ' Go to "KEK Options">"Enroll KEK", Press "Y" if queried, "Enroll KEK using File"' - echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/KEK.cer\u001b[0m' - echo -e ' Choose "Commit Changes and Exit"' - echo -e ' Go to "DB Options">"Enroll Signature">"Enroll Signature using File"' - echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/db.cer\u001b[0m' - echo -e ' Choose "Commit Changes and Exit"' - echo -e ' Repeat the last step for the following certs:' - echo -e ' > \u001b[1m/EFI/loader/keys/auto/MicWinProPCA2011_2011-10-19.crt\u001b[0m' - echo -e ' > \u001b[1m/EFI/loader/keys/auto/MicCorUEFCA2011_2011-06-27.crt\u001b[0m' - echo -e ' Reboot and continue this script.' - echo -e '' - echo -e 'Press ENTER to continue after you followed one of the guides from above.' - read -r - sudo cp "${BASE_DIR}/image.nvram" "${BASE_DIR}/image.nvram.template" - virsh --connect "${LIBVIRT_SOCK}" destroy --domain constell-nvram-gen - virsh --connect "${LIBVIRT_SOCK}" undefine --nvram constell-nvram-gen - rm -f "${BASE_DIR}/image.nvram" + # generate nvram file using libvirt + virt-install --name constell-nvram-gen \ + --connect "${LIBVIRT_SOCK}" \ + --nonetworks \ + --description 'Constellation' \ + --ram 1024 \ + --vcpus 1 \ + --osinfo detect=on,require=off \ + --disk "${image_path},format=raw" \ + --boot "machine=q35,menu=on,loader=${OVMF_CODE},loader.readonly=yes,loader.type=pflash,nvram.template=${OVMF_VARS},nvram=${BASE_DIR}/image.nvram,loader_secure=yes" \ + --features smm.state=on \ + --noautoconsole + echo -e 'connect using' + echo -e ' \u001b[1mvirsh console constell-nvram-gen\u001b[0m' + echo -e '' + echo -e 'Load db cert with MokManager or enroll full PKI with firmware setup' + echo -e '' + echo -e ' \u001b[1mMokManager\u001b[0m' + echo -e ' For mokmanager, try to boot as usual. You will see this message:' + echo -e ' > "Verification failed: (0x1A) Security Violation"' + echo -e ' Press OK, then ENTER, then "Enroll key from disk"' + echo -e ' Select the following key:' + echo -e ' > \u001b[1m/EFI/loader/keys/auto/db.cer\u001b[0m' + echo -e ' Press Continue, then choose "Yes" to the question "Enroll the key(s)?"' + echo -e ' Choose reboot and continue this script.' + echo -e '' + echo -e ' \u001b[1mFirmware setup\u001b[0m' + echo -e ' For firmware setup, press F2.' + echo -e ' Go to "Device Manager">"Secure Boot Configuration">"Secure Boot Mode"' + echo -e ' Choose "Custom Mode"' + echo -e ' Go to "Custom Securee Boot Options"' + echo -e ' Go to "PK Options">"Enroll PK", Press "Y" if queried, "Enroll PK using File"' + echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/PK.cer\u001b[0m' + echo -e ' Choose "Commit Changes and Exit"' + echo -e ' Go to "KEK Options">"Enroll KEK", Press "Y" if queried, "Enroll KEK using File"' + echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/KEK.cer\u001b[0m' + echo -e ' Choose "Commit Changes and Exit"' + echo -e ' Go to "DB Options">"Enroll Signature">"Enroll Signature using File"' + echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/db.cer\u001b[0m' + echo -e ' Choose "Commit Changes and Exit"' + echo -e ' Repeat the last step for the following certs:' + echo -e ' > \u001b[1m/EFI/loader/keys/auto/MicWinProPCA2011_2011-10-19.crt\u001b[0m' + echo -e ' > \u001b[1m/EFI/loader/keys/auto/MicCorUEFCA2011_2011-06-27.crt\u001b[0m' + echo -e ' Reboot and continue this script.' + echo -e '' + echo -e 'Press ENTER to continue after you followed one of the guides from above.' + read -r + sudo cp "${BASE_DIR}/image.nvram" "${BASE_DIR}/image.nvram.template" + virsh --connect "${LIBVIRT_SOCK}" destroy --domain constell-nvram-gen + virsh --connect "${LIBVIRT_SOCK}" undefine --nvram constell-nvram-gen + rm -f "${BASE_DIR}/image.nvram" - echo "NVRAM template generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)" + echo "NVRAM template generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)" } libvirt_nvram_gen "$1" diff --git a/image/secure-boot/genkeys.sh b/image/secure-boot/genkeys.sh index 7fcc37fc0..af008db8e 100755 --- a/image/secure-boot/genkeys.sh +++ b/image/secure-boot/genkeys.sh @@ -12,58 +12,57 @@ set -euo pipefail shopt -s inherit_errexit -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd) TEMPLATES=${SCRIPT_DIR}/templates BASE_DIR=$(realpath "${SCRIPT_DIR}/..") -if [[ -z "${PKI}" ]]; then - PKI=${BASE_DIR}/pki +if [[ -z ${PKI} ]]; then + PKI=${BASE_DIR}/pki fi -if [[ -z "${PKI_SET}" ]]; then - PKI_SET=dev +if [[ -z ${PKI_SET} ]]; then + PKI_SET=dev fi -gen_pki () { - # Only use for non-production images. - # Use real PKI for production images instead. - count=$(find "${PKI}" -maxdepth 1 \( -name '*.key' -o -name '*.crt' -o -name '*.cer' -o -name '*.esl' -o -name '*.auth' \) 2>/dev/null | wc -l) - if [[ "${count}" != 0 ]] - then - echo PKI files "$(ls -1 "$(realpath "--relative-to=$(pwd)" "${PKI}")"/*.{key,crt,cer,esl,auth})" already exist - return - fi - mkdir -p "${PKI}" - pushd "${PKI}" || exit 1 +gen_pki() { + # Only use for non-production images. + # Use real PKI for production images instead. + count=$(find "${PKI}" -maxdepth 1 \( -name '*.key' -o -name '*.crt' -o -name '*.cer' -o -name '*.esl' -o -name '*.auth' \) 2> /dev/null | wc -l) + if [[ ${count} != 0 ]]; then + echo PKI files "$(ls -1 "$(realpath "--relative-to=$(pwd)" "${PKI}")"/*.{key,crt,cer,esl,auth})" already exist + return + fi + mkdir -p "${PKI}" + pushd "${PKI}" || exit 1 - uuid=$(systemd-id128 new --uuid) - for key in PK KEK db; do - openssl req -new -x509 -config "${TEMPLATES}/${PKI_SET}_${key}.conf" -keyout "${key}.key" -out "${key}.crt" -nodes - openssl x509 -outform DER -in "${key}.crt" -out "${key}.cer" - cert-to-efi-sig-list -g "${uuid}" "${key}.crt" "${key}.esl" - done + uuid=$(systemd-id128 new --uuid) + for key in PK KEK db; do + openssl req -new -x509 -config "${TEMPLATES}/${PKI_SET}_${key}.conf" -keyout "${key}.key" -out "${key}.crt" -nodes + openssl x509 -outform DER -in "${key}.crt" -out "${key}.cer" + cert-to-efi-sig-list -g "${uuid}" "${key}.crt" "${key}.esl" + done - for key in MicWinProPCA2011_2011-10-19.crt MicCorUEFCA2011_2011-06-27.crt MicCorKEKCA2011_2011-06-24.crt; do - curl -sL "https://www.microsoft.com/pkiops/certs/${key}" --output "${key}" - sbsiglist --owner 77fa9abd-0359-4d32-bd60-28f4e78f784b --type x509 --output "${key%crt}esl" "${key}" - done + for key in MicWinProPCA2011_2011-10-19.crt MicCorUEFCA2011_2011-06-27.crt MicCorKEKCA2011_2011-06-24.crt; do + curl -sL "https://www.microsoft.com/pkiops/certs/${key}" --output "${key}" + sbsiglist --owner 77fa9abd-0359-4d32-bd60-28f4e78f784b --type x509 --output "${key%crt}esl" "${key}" + done - # Optionally add Microsoft Windows Production CA 2011 (needed to boot into Windows). - cat MicWinProPCA2011_2011-10-19.esl >> db.esl + # Optionally add Microsoft Windows Production CA 2011 (needed to boot into Windows). + cat MicWinProPCA2011_2011-10-19.esl >> db.esl - # Optionally add Microsoft Corporation UEFI CA 2011 (for firmware drivers / option ROMs - # and third-party boot loaders (including shim). This is highly recommended on real - # hardware as not including this may soft-brick your device (see next paragraph). - cat MicCorUEFCA2011_2011-06-27.esl >> db.esl + # Optionally add Microsoft Corporation UEFI CA 2011 (for firmware drivers / option ROMs + # and third-party boot loaders (including shim). This is highly recommended on real + # hardware as not including this may soft-brick your device (see next paragraph). + cat MicCorUEFCA2011_2011-06-27.esl >> db.esl - # Optionally add Microsoft Corporation KEK CA 2011. Recommended if either of the - # Microsoft keys is used as the official UEFI revocation database is signed with this - # key. The revocation database can be updated with [fwupdmgr(1)](https://www.freedesktop.org/software/systemd/man/fwupdmgr.html#). - cat MicCorKEKCA2011_2011-06-24.esl >> KEK.esl + # Optionally add Microsoft Corporation KEK CA 2011. Recommended if either of the + # Microsoft keys is used as the official UEFI revocation database is signed with this + # key. The revocation database can be updated with [fwupdmgr(1)](https://www.freedesktop.org/software/systemd/man/fwupdmgr.html#). + cat MicCorKEKCA2011_2011-06-24.esl >> KEK.esl - sign-efi-sig-list -c PK.crt -k PK.key PK PK.esl PK.auth - sign-efi-sig-list -c PK.crt -k PK.key KEK KEK.esl KEK.auth - sign-efi-sig-list -c KEK.crt -k KEK.key db db.esl db.auth + sign-efi-sig-list -c PK.crt -k PK.key PK PK.esl PK.auth + sign-efi-sig-list -c PK.crt -k PK.key KEK KEK.esl KEK.auth + sign-efi-sig-list -c KEK.crt -k KEK.key db db.esl db.auth - popd || exit 1 + popd || exit 1 } # gen_pki generates a PKI for testing purposes only. diff --git a/image/secure-boot/signed-shim.sh b/image/secure-boot/signed-shim.sh index 8cdd27276..09e4fda78 100755 --- a/image/secure-boot/signed-shim.sh +++ b/image/secure-boot/signed-shim.sh @@ -8,10 +8,9 @@ set -euo pipefail shopt -s inherit_errexit -if (( $# != 1 )) -then - echo "Usage: $0 " - exit 1 +if (($# != 1)); then + echo "Usage: $0 " + exit 1 fi # SOURCE is the URL used to download the signed shim RPM diff --git a/image/upload/pack.sh b/image/upload/pack.sh index 05a70dd38..945499cfd 100755 --- a/image/upload/pack.sh +++ b/image/upload/pack.sh @@ -8,55 +8,54 @@ shopt -s inherit_errexit # Show progress on pipes if `pv` is installed # Otherwise use plain cat -if ! command -v pv &> /dev/null -then - PV="cat" +if ! command -v pv &> /dev/null; then + PV="cat" else - PV="pv" + PV="pv" fi -pack () { - local cloudprovider=$1 - local unpacked_image=$2 - local packed_image=$3 - local unpacked_image_dir - unpacked_image_dir=$(mktemp -d) - local unpacked_image_filename - unpacked_image_filename=disk.raw - local tmp_tar_file - tmp_tar_file=$(mktemp -t verity.XXXXXX.tar) - cp "${unpacked_image}" "${unpacked_image_dir}/${unpacked_image_filename}" +pack() { + local cloudprovider=$1 + local unpacked_image=$2 + local packed_image=$3 + local unpacked_image_dir + unpacked_image_dir=$(mktemp -d) + local unpacked_image_filename + unpacked_image_filename=disk.raw + local tmp_tar_file + tmp_tar_file=$(mktemp -t verity.XXXXXX.tar) + cp "${unpacked_image}" "${unpacked_image_dir}/${unpacked_image_filename}" - case ${cloudprovider} in + case ${cloudprovider} in - gcp) - echo "📥 Packing GCP image..." - tar --owner=0 --group=0 -C "${unpacked_image_dir}" -Sch --format=oldgnu -f "${tmp_tar_file}" "${unpacked_image_filename}" - "${PV}" "${tmp_tar_file}" | pigz -9c > "${packed_image}" - rm "${tmp_tar_file}" - echo " Repacked image stored in ${packed_image}" - ;; + gcp) + echo "📥 Packing GCP image..." + tar --owner=0 --group=0 -C "${unpacked_image_dir}" -Sch --format=oldgnu -f "${tmp_tar_file}" "${unpacked_image_filename}" + "${PV}" "${tmp_tar_file}" | pigz -9c > "${packed_image}" + rm "${tmp_tar_file}" + echo " Repacked image stored in ${packed_image}" + ;; - azure) - echo "📥 Packing Azure image..." - truncate -s %1MiB "${unpacked_image_dir}/${unpacked_image_filename}" - qemu-img convert -p -f raw -O vpc -o force_size,subformat=fixed "${unpacked_image_dir}/${unpacked_image_filename}" "${packed_image}" - echo " Repacked image stored in ${packed_image}" - ;; + azure) + echo "📥 Packing Azure image..." + truncate -s %1MiB "${unpacked_image_dir}/${unpacked_image_filename}" + qemu-img convert -p -f raw -O vpc -o force_size,subformat=fixed "${unpacked_image_dir}/${unpacked_image_filename}" "${packed_image}" + echo " Repacked image stored in ${packed_image}" + ;; - *) - echo "unknown cloud provider" - exit 1 - ;; - esac + *) + echo "unknown cloud provider" + exit 1 + ;; + esac - rm -r "${unpacked_image_dir}" + rm -r "${unpacked_image_dir}" } if [[ $# -ne 3 ]]; then - echo "Usage: $0 " - exit 1 + echo "Usage: $0 " + exit 1 fi pack "${1}" "${2}" "${3}" diff --git a/image/upload/upload_aws.sh b/image/upload/upload_aws.sh index 3d74a8e89..cf5b50631 100755 --- a/image/upload/upload_aws.sh +++ b/image/upload/upload_aws.sh @@ -6,9 +6,9 @@ set -euo pipefail shopt -s inherit_errexit -if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then - # shellcheck source=/dev/null - . "${CONFIG_FILE}" +if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then + # shellcheck source=/dev/null + . "${CONFIG_FILE}" fi CONTAINERS_JSON=$(mktemp /tmp/containers-XXXXXXXXXXXXXX.json) @@ -16,91 +16,91 @@ declare -A AMI_FOR_REGION AMI_OUTPUT=$1 import_status() { - local import_task_id=$1 - aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${import_task_id}" | jq -r '.ImportSnapshotTasks[0].SnapshotTaskDetail.Status' + local import_task_id=$1 + aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${import_task_id}" | jq -r '.ImportSnapshotTasks[0].SnapshotTaskDetail.Status' } wait_for_import() { - local import_task_id=$1 + local import_task_id=$1 + local status + echo -n "Waiting for import to finish" + while true; do local status - echo -n "Waiting for import to finish" - while true; do - local status - status=$(import_status "${import_task_id}") - case "${status}" in - completed) - echo -e "\nImport completed." - break - ;; - active) - echo -n "." - sleep 5 - ;; - *) - echo "Unexpected status: ${status}" - exit 1 - ;; - esac - done + status=$(import_status "${import_task_id}") + case "${status}" in + completed) + echo -e "\nImport completed." + break + ;; + active) + echo -n "." + sleep 5 + ;; + *) + echo "Unexpected status: ${status}" + exit 1 + ;; + esac + done } wait_for_image_available() { - local ami_id=$1 - local region=$2 - echo -n "Waiting for image ${ami_id} to be available" - while true; do - # Waiter ImageAvailable failed: Max attempts exceeded - local status - status=$(aws ec2 wait image-available \ - --region "${region}" \ - --image-ids "${ami_id}" 2>&1 || true) - case "${status}" in - "") - echo -e "\nImage available." - break - ;; - *"Max attempts exceeded"*) - echo -n "." - ;; - *) - echo "Unexpected status: ${status}" - exit 1 - ;; - esac - done + local ami_id=$1 + local region=$2 + echo -n "Waiting for image ${ami_id} to be available" + while true; do + # Waiter ImageAvailable failed: Max attempts exceeded + local status + status=$(aws ec2 wait image-available \ + --region "${region}" \ + --image-ids "${ami_id}" 2>&1 || true) + case "${status}" in + "") + echo -e "\nImage available." + break + ;; + *"Max attempts exceeded"*) + echo -n "." + ;; + *) + echo "Unexpected status: ${status}" + exit 1 + ;; + esac + done } tag_ami_with_backing_snapshot() { - local ami_id=$1 - local region=$2 - wait_for_image_available "${ami_id}" "${region}" - local snapshot_id - snapshot_id=$(aws ec2 describe-images \ - --region "${region}" \ - --image-ids "${ami_id}" \ - --output text --query "Images[0].BlockDeviceMappings[0].Ebs.SnapshotId") - aws ec2 create-tags \ - --region "${region}" \ - --resources "${ami_id}" "${snapshot_id}" \ - --tags "Key=Name,Value=${AWS_IMAGE_NAME}" + local ami_id=$1 + local region=$2 + wait_for_image_available "${ami_id}" "${region}" + local snapshot_id + snapshot_id=$(aws ec2 describe-images \ + --region "${region}" \ + --image-ids "${ami_id}" \ + --output text --query "Images[0].BlockDeviceMappings[0].Ebs.SnapshotId") + aws ec2 create-tags \ + --region "${region}" \ + --resources "${ami_id}" "${snapshot_id}" \ + --tags "Key=Name,Value=${AWS_IMAGE_NAME}" } make_ami_public() { - local ami_id=$1 - local region=$2 - if [ "${AWS_PUBLISH-}" != "true" ]; then - return - fi - aws ec2 modify-image-attribute \ - --region "${region}" \ - --image-id "${ami_id}" \ - --launch-permission "Add=[{Group=all}]" + local ami_id=$1 + local region=$2 + if [[ ${AWS_PUBLISH-} != "true" ]]; then + return + fi + aws ec2 modify-image-attribute \ + --region "${region}" \ + --image-id "${ami_id}" \ + --launch-permission "Add=[{Group=all}]" } create_ami_from_raw_disk() { - echo "Uploading raw disk image to S3" - aws s3 cp "${AWS_IMAGE_PATH}" "s3://${AWS_BUCKET}/${AWS_IMAGE_FILENAME}" --no-progress - printf '{ + echo "Uploading raw disk image to S3" + aws s3 cp "${AWS_IMAGE_PATH}" "s3://${AWS_BUCKET}/${AWS_IMAGE_FILENAME}" --no-progress + printf '{ "Description": "%s", "Format": "raw", "UserBucket": { @@ -108,61 +108,60 @@ create_ami_from_raw_disk() { "S3Key": "%s" } }' "${AWS_IMAGE_NAME}" "${AWS_BUCKET}" "${AWS_IMAGE_FILENAME}" > "${CONTAINERS_JSON}" - IMPORT_SNAPSHOT=$(aws ec2 import-snapshot --region "${AWS_REGION}" --disk-container "file://${CONTAINERS_JSON}") - echo "${IMPORT_SNAPSHOT}" - IMPORT_TASK_ID=$(echo "${IMPORT_SNAPSHOT}" | jq -r '.ImportTaskId') - aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}" - wait_for_import "${IMPORT_TASK_ID}" - AWS_SNAPSHOT=$(aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}" | jq -r '.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId') - echo "Deleting raw disk image from S3" - aws s3 rm "s3://${AWS_BUCKET}/${AWS_IMAGE_FILENAME}" - rm "${CONTAINERS_JSON}" - REGISTER_OUT=$(aws ec2 register-image \ - --region "${AWS_REGION}" \ - --name "${AWS_IMAGE_NAME}" \ - --boot-mode uefi \ - --architecture x86_64 \ - --root-device-name /dev/xvda \ - --block-device-mappings "DeviceName=/dev/xvda,Ebs={SnapshotId=${AWS_SNAPSHOT}}" \ - --ena-support \ - --tpm-support v2.0 \ - --uefi-data "$(cat "${AWS_EFIVARS_PATH}")" \ - ) - IMAGE_ID=$(echo "${REGISTER_OUT}" | jq -r '.ImageId') - AMI_FOR_REGION=( ["${AWS_REGION}"]="${IMAGE_ID}") - tag_ami_with_backing_snapshot "${IMAGE_ID}" "${AWS_REGION}" - make_ami_public "${IMAGE_ID}" "${AWS_REGION}" - echo "Imported initial AMI as ${IMAGE_ID} in ${AWS_REGION}" + IMPORT_SNAPSHOT=$(aws ec2 import-snapshot --region "${AWS_REGION}" --disk-container "file://${CONTAINERS_JSON}") + echo "${IMPORT_SNAPSHOT}" + IMPORT_TASK_ID=$(echo "${IMPORT_SNAPSHOT}" | jq -r '.ImportTaskId') + aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}" + wait_for_import "${IMPORT_TASK_ID}" + AWS_SNAPSHOT=$(aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}" | jq -r '.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId') + echo "Deleting raw disk image from S3" + aws s3 rm "s3://${AWS_BUCKET}/${AWS_IMAGE_FILENAME}" + rm "${CONTAINERS_JSON}" + REGISTER_OUT=$( + aws ec2 register-image \ + --region "${AWS_REGION}" \ + --name "${AWS_IMAGE_NAME}" \ + --boot-mode uefi \ + --architecture x86_64 \ + --root-device-name /dev/xvda \ + --block-device-mappings "DeviceName=/dev/xvda,Ebs={SnapshotId=${AWS_SNAPSHOT}}" \ + --ena-support \ + --tpm-support v2.0 \ + --uefi-data "$(cat "${AWS_EFIVARS_PATH}")" + ) + IMAGE_ID=$(echo "${REGISTER_OUT}" | jq -r '.ImageId') + AMI_FOR_REGION=(["${AWS_REGION}"]="${IMAGE_ID}") + tag_ami_with_backing_snapshot "${IMAGE_ID}" "${AWS_REGION}" + make_ami_public "${IMAGE_ID}" "${AWS_REGION}" + echo "Imported initial AMI as ${IMAGE_ID} in ${AWS_REGION}" } replicate_ami() { - local target_region=$1 - local replicated_image_out - replicated_image_out=$(aws ec2 copy-image \ - --name "${AWS_IMAGE_NAME}" \ - --source-region "${AWS_REGION}" \ - --source-image-id "${IMAGE_ID}" \ - --region "${target_region}") - local replicated_image_id - replicated_image_id=$(echo "${replicated_image_out}" | jq -r '.ImageId') - AMI_FOR_REGION["${target_region}"]=${replicated_image_id} - echo "Replicated AMI as ${replicated_image_id} in ${target_region}" + local target_region=$1 + local replicated_image_out + replicated_image_out=$(aws ec2 copy-image \ + --name "${AWS_IMAGE_NAME}" \ + --source-region "${AWS_REGION}" \ + --source-image-id "${IMAGE_ID}" \ + --region "${target_region}") + local replicated_image_id + replicated_image_id=$(echo "${replicated_image_out}" | jq -r '.ImageId') + AMI_FOR_REGION["${target_region}"]=${replicated_image_id} + echo "Replicated AMI as ${replicated_image_id} in ${target_region}" } - - create_ami_from_raw_disk # replicate in parallel for region in ${AWS_REPLICATION_REGIONS}; do - replicate_ami "${region}" + replicate_ami "${region}" done # wait for all images to be available and tag + publish them for region in ${AWS_REPLICATION_REGIONS}; do - tag_ami_with_backing_snapshot "${AMI_FOR_REGION[${region}]}" "${region}" - make_ami_public "${AMI_FOR_REGION[${region}]}" "${region}" + tag_ami_with_backing_snapshot "${AMI_FOR_REGION[${region}]}" "${region}" + make_ami_public "${AMI_FOR_REGION[${region}]}" "${region}" done echo -n "{\"${AWS_REGION}\": \"${AMI_FOR_REGION[${AWS_REGION}]}\"" > "${AMI_OUTPUT}" for region in ${AWS_REPLICATION_REGIONS}; do - echo -n ", \"${region}\": \"${AMI_FOR_REGION[${region}]}\"" >> "${AMI_OUTPUT}" + echo -n ", \"${region}\": \"${AMI_FOR_REGION[${region}]}\"" >> "${AMI_OUTPUT}" done echo "}" >> "${AMI_OUTPUT}" diff --git a/image/upload/upload_azure.sh b/image/upload/upload_azure.sh index 6bf251ee4..82918b2d6 100755 --- a/image/upload/upload_azure.sh +++ b/image/upload/upload_azure.sh @@ -6,193 +6,192 @@ set -euo pipefail shopt -s inherit_errexit -if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then - # shellcheck source=/dev/null - . "${CONFIG_FILE}" +if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then + # shellcheck source=/dev/null + . "${CONFIG_FILE}" fi - CREATE_SIG_VERSION=NO POSITIONAL_ARGS=() while [[ $# -gt 0 ]]; do case $1 in - -g|--gallery) - CREATE_SIG_VERSION=YES - shift # past argument - ;; - --disk-name) - AZURE_DISK_NAME="$2" - shift # past argument - shift # past value - ;; - -*) - echo "Unknown option $1" - exit 1 - ;; - *) - POSITIONAL_ARGS+=("$1") # save positional arg - shift # past argument - ;; + -g | --gallery) + CREATE_SIG_VERSION=YES + shift # past argument + ;; + --disk-name) + AZURE_DISK_NAME="$2" + shift # past argument + shift # past value + ;; + -*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; esac done set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters -if [[ "${AZURE_SECURITY_TYPE}" == "ConfidentialVM" ]]; then - AZURE_DISK_SECURITY_TYPE=ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey - AZURE_SIG_VERSION_ENCRYPTION_TYPE=EncryptedVMGuestStateOnlyWithPmk -elif [[ "${AZURE_SECURITY_TYPE}" == "ConfidentialVMSupported" ]]; then - AZURE_DISK_SECURITY_TYPE="" -elif [[ "${AZURE_SECURITY_TYPE}" == "TrustedLaunch" ]]; then - AZURE_DISK_SECURITY_TYPE=TrustedLaunch +if [[ ${AZURE_SECURITY_TYPE} == "ConfidentialVM" ]]; then + AZURE_DISK_SECURITY_TYPE=ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey + AZURE_SIG_VERSION_ENCRYPTION_TYPE=EncryptedVMGuestStateOnlyWithPmk +elif [[ ${AZURE_SECURITY_TYPE} == "ConfidentialVMSupported" ]]; then + AZURE_DISK_SECURITY_TYPE="" +elif [[ ${AZURE_SECURITY_TYPE} == "TrustedLaunch" ]]; then + AZURE_DISK_SECURITY_TYPE=TrustedLaunch else - echo "Unknown security type: ${AZURE_SECURITY_TYPE}" - exit 1 + echo "Unknown security type: ${AZURE_SECURITY_TYPE}" + exit 1 fi AZURE_CVM_ENCRYPTION_ARGS="" -if [[ -n "${AZURE_SIG_VERSION_ENCRYPTION_TYPE-}" ]]; then - AZURE_CVM_ENCRYPTION_ARGS=" --target-region-cvm-encryption " - for _ in ${AZURE_REPLICATION_REGIONS}; do - AZURE_CVM_ENCRYPTION_ARGS=" ${AZURE_CVM_ENCRYPTION_ARGS} ${AZURE_SIG_VERSION_ENCRYPTION_TYPE}, " - done +if [[ -n ${AZURE_SIG_VERSION_ENCRYPTION_TYPE-} ]]; then + AZURE_CVM_ENCRYPTION_ARGS=" --target-region-cvm-encryption " + for _ in ${AZURE_REPLICATION_REGIONS}; do + AZURE_CVM_ENCRYPTION_ARGS=" ${AZURE_CVM_ENCRYPTION_ARGS} ${AZURE_SIG_VERSION_ENCRYPTION_TYPE}, " + done fi echo "Replicating image in ${AZURE_REPLICATION_REGIONS}" AZURE_VMGS_PATH=$1 -if [[ -z "${AZURE_VMGS_PATH}" ]] && [[ "${AZURE_SECURITY_TYPE}" == "ConfidentialVM" ]]; then - echo "No VMGS path provided - using default ConfidentialVM VMGS" - AZURE_VMGS_PATH="${BLOBS_DIR}/cvm-vmgs.vhd" -elif [[ -z "${AZURE_VMGS_PATH}" ]] && [[ "${AZURE_SECURITY_TYPE}" == "TrustedLaunch" ]]; then - echo "No VMGS path provided - using default TrsutedLaunch VMGS" - AZURE_VMGS_PATH="${BLOBS_DIR}/trusted-launch-vmgs.vhd" +if [[ -z ${AZURE_VMGS_PATH} ]] && [[ ${AZURE_SECURITY_TYPE} == "ConfidentialVM" ]]; then + echo "No VMGS path provided - using default ConfidentialVM VMGS" + AZURE_VMGS_PATH="${BLOBS_DIR}/cvm-vmgs.vhd" +elif [[ -z ${AZURE_VMGS_PATH} ]] && [[ ${AZURE_SECURITY_TYPE} == "TrustedLaunch" ]]; then + echo "No VMGS path provided - using default TrsutedLaunch VMGS" + AZURE_VMGS_PATH="${BLOBS_DIR}/trusted-launch-vmgs.vhd" fi SIZE=$(wc -c "${AZURE_IMAGE_PATH}" | cut -d " " -f1) -create_disk_with_vmgs () { - az disk create \ - -n "${AZURE_DISK_NAME}" \ - -g "${AZURE_RESOURCE_GROUP_NAME}" \ - -l "${AZURE_REGION}" \ - --hyper-v-generation V2 \ - --os-type Linux \ - --upload-size-bytes "${SIZE}" \ - --sku standard_lrs \ - --upload-type UploadWithSecurityData \ - --security-type "${AZURE_DISK_SECURITY_TYPE}" - az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" - az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']" - DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \ - --access-level Write --duration-in-seconds 86400 \ - ${AZURE_VMGS_PATH+"--secure-vm-guest-state-sas"}) - azcopy copy "${AZURE_IMAGE_PATH}" \ - "$(echo "${DISK_SAS}" | jq -r .accessSas)" \ - --blob-type PageBlob - if [[ -z "${AZURE_VMGS_PATH}" ]]; then - echo "No VMGS path provided - skipping VMGS upload" - else - azcopy copy "${AZURE_VMGS_PATH}" \ - "$(echo "${DISK_SAS}" | jq -r .securityDataAccessSas)" \ - --blob-type PageBlob - fi - az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" +create_disk_with_vmgs() { + az disk create \ + -n "${AZURE_DISK_NAME}" \ + -g "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + --hyper-v-generation V2 \ + --os-type Linux \ + --upload-size-bytes "${SIZE}" \ + --sku standard_lrs \ + --upload-type UploadWithSecurityData \ + --security-type "${AZURE_DISK_SECURITY_TYPE}" + az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" + az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']" + DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \ + --access-level Write --duration-in-seconds 86400 \ + ${AZURE_VMGS_PATH+"--secure-vm-guest-state-sas"}) + azcopy copy "${AZURE_IMAGE_PATH}" \ + "$(echo "${DISK_SAS}" | jq -r .accessSas)" \ + --blob-type PageBlob + if [[ -z ${AZURE_VMGS_PATH} ]]; then + echo "No VMGS path provided - skipping VMGS upload" + else + azcopy copy "${AZURE_VMGS_PATH}" \ + "$(echo "${DISK_SAS}" | jq -r .securityDataAccessSas)" \ + --blob-type PageBlob + fi + az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" } -create_disk_without_vmgs () { - az disk create \ - -n "${AZURE_DISK_NAME}" \ - -g "${AZURE_RESOURCE_GROUP_NAME}" \ - -l "${AZURE_REGION}" \ - --hyper-v-generation V2 \ - --os-type Linux \ - --upload-size-bytes "${SIZE}" \ - --sku standard_lrs \ - --upload-type Upload - az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" - az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']" - DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \ - --access-level Write --duration-in-seconds 86400) - azcopy copy "${AZURE_IMAGE_PATH}" \ - "$(echo "${DISK_SAS}" | jq -r .accessSas)" \ - --blob-type PageBlob - az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" +create_disk_without_vmgs() { + az disk create \ + -n "${AZURE_DISK_NAME}" \ + -g "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + --hyper-v-generation V2 \ + --os-type Linux \ + --upload-size-bytes "${SIZE}" \ + --sku standard_lrs \ + --upload-type Upload + az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" + az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']" + DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \ + --access-level Write --duration-in-seconds 86400) + azcopy copy "${AZURE_IMAGE_PATH}" \ + "$(echo "${DISK_SAS}" | jq -r .accessSas)" \ + --blob-type PageBlob + az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" } -create_disk () { - if [[ -z "${AZURE_VMGS_PATH}" ]]; then - create_disk_without_vmgs - else - create_disk_with_vmgs - fi +create_disk() { + if [[ -z ${AZURE_VMGS_PATH} ]]; then + create_disk_without_vmgs + else + create_disk_with_vmgs + fi } -delete_disk () { - az disk delete -y -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" +delete_disk() { + az disk delete -y -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" } -create_image () { - if [[ -n "${AZURE_VMGS_PATH}" ]]; then - return - fi - az image create \ - --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ - -l "${AZURE_REGION}" \ - -n "${AZURE_DISK_NAME}" \ - --hyper-v-generation V2 \ - --os-type Linux \ - --source "$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" +create_image() { + if [[ -n ${AZURE_VMGS_PATH} ]]; then + return + fi + az image create \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + -n "${AZURE_DISK_NAME}" \ + --hyper-v-generation V2 \ + --os-type Linux \ + --source "$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" } -delete_image () { - if [[ -n "${AZURE_VMGS_PATH}" ]]; then - return - fi - az image delete -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" +delete_image() { + if [[ -n ${AZURE_VMGS_PATH} ]]; then + return + fi + az image delete -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" } -create_sig_version () { - if [[ -n "${AZURE_VMGS_PATH}" ]]; then - local DISK - DISK="$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" - local SOURCE="--os-snapshot ${DISK}" - else - local IMAGE - IMAGE="$(az image list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" - local SOURCE="--managed-image ${IMAGE}" - fi - az sig create -l "${AZURE_REGION}" --gallery-name "${AZURE_GALLERY_NAME}" --resource-group "${AZURE_RESOURCE_GROUP_NAME}" || true - az sig image-definition create \ - --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ - -l "${AZURE_REGION}" \ - --gallery-name "${AZURE_GALLERY_NAME}" \ - --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ - --publisher "${AZURE_PUBLISHER}" \ - --offer "${AZURE_IMAGE_OFFER}" \ - --sku "${AZURE_SKU}" \ - --os-type Linux \ - --os-state generalized \ - --hyper-v-generation V2 \ - --features SecurityType="${AZURE_SECURITY_TYPE}" || true - az sig image-version create \ - --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ - -l "${AZURE_REGION}" \ - --gallery-name "${AZURE_GALLERY_NAME}" \ - --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ - --gallery-image-version "${AZURE_IMAGE_VERSION}" \ - --target-regions "${AZURE_REPLICATION_REGIONS}" \ - "${AZURE_CVM_ENCRYPTION_ARGS}" \ - --replica-count 1 \ - --replication-mode Full \ - "${SOURCE}" +create_sig_version() { + if [[ -n ${AZURE_VMGS_PATH} ]]; then + local DISK + DISK="$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" + local SOURCE="--os-snapshot ${DISK}" + else + local IMAGE + IMAGE="$(az image list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" + local SOURCE="--managed-image ${IMAGE}" + fi + az sig create -l "${AZURE_REGION}" --gallery-name "${AZURE_GALLERY_NAME}" --resource-group "${AZURE_RESOURCE_GROUP_NAME}" || true + az sig image-definition create \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + --gallery-name "${AZURE_GALLERY_NAME}" \ + --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ + --publisher "${AZURE_PUBLISHER}" \ + --offer "${AZURE_IMAGE_OFFER}" \ + --sku "${AZURE_SKU}" \ + --os-type Linux \ + --os-state generalized \ + --hyper-v-generation V2 \ + --features SecurityType="${AZURE_SECURITY_TYPE}" || true + az sig image-version create \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + --gallery-name "${AZURE_GALLERY_NAME}" \ + --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ + --gallery-image-version "${AZURE_IMAGE_VERSION}" \ + --target-regions "${AZURE_REPLICATION_REGIONS}" \ + "${AZURE_CVM_ENCRYPTION_ARGS}" \ + --replica-count 1 \ + --replication-mode Full \ + "${SOURCE}" } create_disk -if [[ "${CREATE_SIG_VERSION}" = "YES" ]]; then - create_image - create_sig_version - delete_image - delete_disk +if [[ ${CREATE_SIG_VERSION} == "YES" ]]; then + create_image + create_sig_version + delete_image + delete_disk fi diff --git a/image/upload/upload_gcp.sh b/image/upload/upload_gcp.sh index 9dd5453be..2c267619c 100755 --- a/image/upload/upload_gcp.sh +++ b/image/upload/upload_gcp.sh @@ -6,9 +6,9 @@ set -euo pipefail shopt -s inherit_errexit -if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then - # shellcheck source=/dev/null - . "${CONFIG_FILE}" +if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then + # shellcheck source=/dev/null + . "${CONFIG_FILE}" fi PK_FILE=${PKI}/PK.cer @@ -19,15 +19,15 @@ gsutil mb -l "${GCP_REGION}" "gs://${GCP_BUCKET}" || true gsutil pap set enforced "gs://${GCP_BUCKET}" || true gsutil cp "${GCP_IMAGE_PATH}" "gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}" gcloud compute images create "${GCP_IMAGE_NAME}" \ - "--family=${GCP_IMAGE_FAMILY}" \ - "--source-uri=gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}" \ - "--guest-os-features=GVNIC,SEV_CAPABLE,VIRTIO_SCSI_MULTIQUEUE,UEFI_COMPATIBLE" \ - "--platform-key-file=${PK_FILE}" \ - "--key-exchange-key-file=${KEK_FILES}" \ - "--signature-database-file=${DB_FILES}" \ - "--project=${GCP_PROJECT}" + "--family=${GCP_IMAGE_FAMILY}" \ + "--source-uri=gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}" \ + "--guest-os-features=GVNIC,SEV_CAPABLE,VIRTIO_SCSI_MULTIQUEUE,UEFI_COMPATIBLE" \ + "--platform-key-file=${PK_FILE}" \ + "--key-exchange-key-file=${KEK_FILES}" \ + "--signature-database-file=${DB_FILES}" \ + "--project=${GCP_PROJECT}" gcloud compute images add-iam-policy-binding "${GCP_IMAGE_NAME}" \ - "--project=${GCP_PROJECT}" \ - --member='allAuthenticatedUsers' \ - --role='roles/compute.imageUser' + "--project=${GCP_PROJECT}" \ + --member='allAuthenticatedUsers' \ + --role='roles/compute.imageUser' gsutil rm "gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}"