diff --git a/.github/actions/constellation_create/aws-logs.sh b/.github/actions/constellation_create/aws-logs.sh index e3db8a7e4..298e91e9d 100755 --- a/.github/actions/constellation_create/aws-logs.sh +++ b/.github/actions/constellation_create/aws-logs.sh @@ -34,18 +34,18 @@ workerInstances=$(\ echo "Fetching logs from control planes: ${controlInstances}" -for instance in $controlInstances; do - printf "Fetching for %s\n" ${instance} - aws ec2 get-console-output --region ${1} --instance-id ${instance} | \ +for instance in ${controlInstances}; do + printf "Fetching for %s\n" "${instance}" + aws ec2 get-console-output --region "${1}" --instance-id "${instance}" | \ jq -r .'Output' | \ - tail -n +2 > control-plane-${instance}.log + tail -n +2 > control-plane-"${instance}".log done echo "Fetching logs from worker nodes: ${workerInstances}" -for instance in $workerInstances; do - printf "Fetching for %s\n" ${instance} - aws ec2 get-console-output --region ${1} --instance-id ${instance} | \ +for instance in ${workerInstances}; do + printf "Fetching for %s\n" "${instance}" + aws ec2 get-console-output --region "${1}" --instance-id "${instance}" | \ jq -r .'Output' | \ - tail -n +2 > worker-${instance}.log + tail -n +2 > worker-"${instance}".log done diff --git a/.github/actions/constellation_create/az-logs.sh b/.github/actions/constellation_create/az-logs.sh index 68bd1dcca..27a6bf35f 100755 --- a/.github/actions/constellation_create/az-logs.sh +++ b/.github/actions/constellation_create/az-logs.sh @@ -1,21 +1,24 @@ #!/usr/bin/env bash -printf "Fetching logs of instances in resource group %s\n" $1 +set -euo pipefail +shopt -s inherit_errexit + +printf "Fetching logs of instances in resource group %s\n" "${1}" # get list of all scale sets -scalesetsjson=$(az vmss list --resource-group $1 -o json) -scalesetslist=$(echo $scalesetsjson | jq -r '.[] | .name') +scalesetsjson=$(az vmss list --resource-group "${1}" -o json) +scalesetslist=$(echo "${scalesetsjson}" | jq -r '.[] | .name') subscription=$(az account show | jq -r .id) -printf "Checking scalesets %s\n" $scalesetslist +printf "Checking scalesets %s\n" "${scalesetslist}" -for scaleset in $scalesetslist; do - instanceids=$(az vmss list-instances --resource-group $1 --name ${scaleset} -o json | jq -r '.[] | .instanceId') - printf "Checking instance IDs %s\n" $instanceids - for instanceid in $instanceids; do - bloburi=$(az rest --method post --url https://management.azure.com/subscriptions/${subscription}/resourceGroups/${1}/providers/Microsoft.Compute/virtualMachineScaleSets/${scaleset}/virtualmachines/$instanceid/retrieveBootDiagnosticsData?api-version=2022-03-01 | jq '.serialConsoleLogBlobUri' -r) +for scaleset in ${scalesetslist}; do + instanceids=$(az vmss list-instances --resource-group "${1}" --name "${scaleset}" -o json | jq -r '.[] | .instanceId') + printf "Checking instance IDs %s\n" "${instanceids}" + for instanceid in ${instanceids}; do + bloburi=$(az rest --method post --url https://management.azure.com/subscriptions/"${subscription}"/resourceGroups/"${1}"/providers/Microsoft.Compute/virtualMachineScaleSets/"${scaleset}"/virtualmachines/"${instanceid}"/retrieveBootDiagnosticsData?api-version=2022-03-01 | jq '.serialConsoleLogBlobUri' -r) sleep 4 - curl -sL -o "./${scaleset}-${instanceid}.log" $bloburi + curl -sL -o "./${scaleset}-${instanceid}.log" "${bloburi}" realpath "./${scaleset}-${instanceid}.log" done done diff --git a/.github/actions/constellation_create/gcp-logs.sh b/.github/actions/constellation_create/gcp-logs.sh index 149c20147..951ea580f 100755 --- a/.github/actions/constellation_create/gcp-logs.sh +++ b/.github/actions/constellation_create/gcp-logs.sh @@ -1,24 +1,27 @@ #!/usr/bin/env bash -CONTROL_INSTANCE_GROUP=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.base_instance_name' ) -WORKER_INSTANCE_GROUP=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_worker") | .resources[0].values.base_instance_name') -ZONE=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.zone' ) +set -euo pipefail +shopt -s inherit_errexit -CONTROL_INSTANCE_GROUP_SHORT=${CONTROL_INSTANCE_GROUP##*/} -WORKER_INSTANCE_GROUP_SHORT=${WORKER_INSTANCE_GROUP##*/} +controlInstanceGroup=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.base_instance_name' ) +workerInstanceGroup=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_worker") | .resources[0].values.base_instance_name') +zone=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.zone' ) -CONTROL_INSTANCES=$(gcloud compute instance-groups managed list-instances ${CONTROL_INSTANCE_GROUP_SHORT} --zone ${ZONE} --format=json | jq -r '.[] | .instance') -WORKER_INSTANCES=$(gcloud compute instance-groups managed list-instances ${WORKER_INSTANCE_GROUP_SHORT} --zone ${ZONE} --format=json | jq -r '.[] | .instance') +controlInstanceGroup=${controlInstanceGroup##*/} +workerInstanceGroupShort=${workerInstanceGroup##*/} -ALL_INSTANCES="$CONTROL_INSTANCES $WORKER_INSTANCES" +controlInstances=$(gcloud compute instance-groups managed list-instances "${controlInstanceGroup}" --zone "${zone}" --format=json | jq -r '.[] | .instance') +workerInstances=$(gcloud compute instance-groups managed list-instances "${workerInstanceGroupShort}" --zone "${zone}" --format=json | jq -r '.[] | .instance') -printf "Fetching logs for %s and %s\n" ${CONTROL_INSTANCES} ${WORKER_INSTANCES} +ALL_INSTANCES="${controlInstances} ${workerInstances}" -for INSTANCE in $ALL_INSTANCES; do +printf "Fetching logs for %s and %s\n" "${controlInstances}" "${workerInstances}" + +for INSTANCE in ${ALL_INSTANCES}; do SHORT_NAME=${INSTANCE##*/} - printf "Fetching for %s\n" ${SHORT_NAME} - gcloud compute instances get-serial-port-output ${INSTANCE} \ + printf "Fetching for %s\n" "${SHORT_NAME}" + gcloud compute instances get-serial-port-output "${INSTANCE}" \ --port 1 \ --start 0 \ - --zone ${ZONE} > ${SHORT_NAME}.log + --zone "${zone}" > "${SHORT_NAME}".log done diff --git a/.shellcheckrc b/.shellcheckrc new file mode 100644 index 000000000..7e6cb9d42 --- /dev/null +++ b/.shellcheckrc @@ -0,0 +1,13 @@ +external-sources=true +source-path=SCRIPTDIR + +disable=SC2181 + +# Enable optionals, see https://github.com/koalaman/shellcheck/wiki/optional. +enable=add-default-case +enable=avoid-nullary-conditions +enable=check-set-e-suppressed +enable=deprecate-which +enable=quote-safe-variables +enable=require-double-brackets +enable=require-variable-braces diff --git a/cli/internal/helm/generateCilium.sh b/cli/internal/helm/generateCilium.sh index feec3040b..6bf85a722 100755 --- a/cli/internal/helm/generateCilium.sh +++ b/cli/internal/helm/generateCilium.sh @@ -1,10 +1,13 @@ #!/usr/bin/env bash -CALLDIR=$(pwd) -CILIUMTMPDIR=$(mktemp -d) -cd $CILIUMTMPDIR +set -euo pipefail +shopt -s inherit_errexit + +calldir=$(pwd) +ciliumTmpDir=$(mktemp -d) +cd "${ciliumTmpDir}" || exit 1 git clone --depth 1 -b 1.12.1 https://github.com/cilium/cilium.git -cd cilium -git apply $CALLDIR/cilium.patch -cp -r install/kubernetes/cilium $CALLDIR/charts -rm -r $CILIUMTMPDIR +cd cilium || exit 1 +git apply "${calldir}"/cilium.patch +cp -r install/kubernetes/cilium "${calldir}"/charts +rm -r "${ciliumTmpDir}" diff --git a/cli/internal/libvirt/start.sh b/cli/internal/libvirt/start.sh index 84cc82ed1..b3a5ddcdf 100755 --- a/cli/internal/libvirt/start.sh +++ b/cli/internal/libvirt/start.sh @@ -1,8 +1,11 @@ #!/usr/bin/env bash +set -euo pipefail +shopt -s inherit_errexit + # Assign qemu the GID of the host system's 'kvm' group to avoid permission issues for environments defaulting to 660 for /dev/kvm (e.g. Debian-based distros) KVM_HOST_GID="$(stat -c '%g' /dev/kvm)" -groupadd -o -g "$KVM_HOST_GID" host-kvm +groupadd -o -g "${KVM_HOST_GID}" host-kvm usermod -a -G host-kvm qemu # Start libvirt daemon diff --git a/hack/azure-jump-host/jump-host-create b/hack/azure-jump-host/jump-host-create index 8f33bb05b..5c3e27d22 100755 --- a/hack/azure-jump-host/jump-host-create +++ b/hack/azure-jump-host/jump-host-create @@ -1,8 +1,9 @@ #!/usr/bin/env bash -set -e -set -o pipefail -SCRIPTDIR="$( dirname -- $(realpath "${BASH_SOURCE}"); )"; +set -euo pipefail +shopt -s inherit_errexit + +SCRIPTDIR="$(dirname -- "$(realpath "${BASH_SOURCE[0]}")"; )"; RG=$(jq -r .azureresourcegroup constellation-state.json) SUBNET=$(jq -r .azuresubnet constellation-state.json) VNET=${SUBNET%"/subnets/nodeNetwork"} diff --git a/hack/check-licenses.sh b/hack/check-licenses.sh index 6ae79e75e..1fb949d6f 100755 --- a/hack/check-licenses.sh +++ b/hack/check-licenses.sh @@ -2,27 +2,28 @@ # Compare licenses of Go dependencies against a whitelist. -set -e -o pipefail +set -euo pipefail +shopt -s inherit_errexit not_allowed() { - echo "license not allowed for package: $line" + echo "license not allowed for package: ${line}" err=1 } go mod download go-licenses csv ./... | { -while read line; do +while read -r line; do pkg=${line%%,*} lic=${line##*,} - case $lic in + case ${lic} in Apache-2.0|BSD-2-Clause|BSD-3-Clause|ISC|MIT) ;; MPL-2.0) - case $pkg in + case ${pkg} in github.com/talos-systems/talos/pkg/machinery/config/encoder) ;; github.com/letsencrypt/boulder) @@ -36,7 +37,7 @@ while read line; do ;; AGPL-3.0) - case $pkg in + case ${pkg} in github.com/edgelesssys/constellation/v2) ;; *) @@ -46,7 +47,7 @@ while read line; do ;; Unknown) - case $pkg in + case ${pkg} in *) not_allowed ;; @@ -54,11 +55,11 @@ while read line; do ;; *) - echo "unknown license: $line" + echo "unknown license: ${line}" err=1 ;; esac done -exit $err +exit "${err}" } diff --git a/hack/fetch-broken-e2e/fetch.sh b/hack/fetch-broken-e2e/fetch.sh index 7b3835e17..3915e532c 100755 --- a/hack/fetch-broken-e2e/fetch.sh +++ b/hack/fetch-broken-e2e/fetch.sh @@ -1,21 +1,22 @@ #!/usr/bin/env bash set -euo pipefail +shopt -s inherit_errexit LATEST_AZURE_RUNS=$(gh run list -R edgelesssys/constellation -w 'e2e Test Azure' --json databaseId -q '.[].databaseId') -echo $LATEST_AZURE_RUNS -for RUN_ID in $LATEST_AZURE_RUNS +echo "${LATEST_AZURE_RUNS}" +for RUN_ID in ${LATEST_AZURE_RUNS} do # Might fail, because no state was written, because e2e pipeline failed early # Or, because state was downloaded by earlier run of this script - gh run download ${RUN_ID} -R edgelesssys/constellation -n constellation-state.json -D azure/${RUN_ID} || true + gh run download "${RUN_ID}" -R edgelesssys/constellation -n constellation-state.json -D azure/"${RUN_ID}" || true done LATEST_GCP_RUNS=$(gh run list -R edgelesssys/constellation -w 'e2e Test GCP' --json databaseId -q '.[].databaseId') -echo $LATEST_GCP_RUNS -for RUN_ID in $LATEST_GCP_RUNS +echo "${LATEST_GCP_RUNS}" +for RUN_ID in ${LATEST_GCP_RUNS} do # Might fail, because no state was written, because e2e pipeline failed early # Or, because state was downloaded by earlier run of this script - gh run download ${RUN_ID} -R edgelesssys/constellation -n constellation-state.json -D gcp/${RUN_ID} || true + gh run download "${RUN_ID}" -R edgelesssys/constellation -n constellation-state.json -D gcp/"${RUN_ID}" || true done diff --git a/hack/fetch-broken-e2e/find.sh b/hack/fetch-broken-e2e/find.sh index e0755a37f..41ff88637 100755 --- a/hack/fetch-broken-e2e/find.sh +++ b/hack/fetch-broken-e2e/find.sh @@ -1,13 +1,14 @@ #!/usr/bin/env bash set -euo pipefail +shopt -s inherit_errexit TO_DELETE=$(grep -lr "\"uid\": \"${1}\"" . || true) -if [ -z "$TO_DELETE" ] +if [[ -z "${TO_DELETE}" ]] then - printf "Unable to find '${1}'\n" + printf "Unable to find '%s'\n" "${1}" else printf "Statefile found. You should run:\n\n" - printf "cd %s\n" $TO_DELETE + printf "cd %s\n" "${TO_DELETE}" printf "constellation terminate --yes\n\n" fi diff --git a/hack/importAzure.sh b/hack/importAzure.sh index 96ca961be..ee4bf3271 100755 --- a/hack/importAzure.sh +++ b/hack/importAzure.sh @@ -15,6 +15,7 @@ # * AZURE_IMAGE_NAME: (optional, default: upload-target) Temporary image used for upload, must not exist. set -euo pipefail +shopt -s inherit_errexit # Required tools if ! command -v az &> /dev/null @@ -71,8 +72,8 @@ echo "AZURE_SKU=${AZURE_SKU}" echo "AZURE_SECURITY_TYPE=${AZURE_SECURITY_TYPE}" echo "" -read -p "Continue (y/n)?" choice -case "$choice" in +read -r -p "Continue (y/n)?" choice +case "${choice}" in y|Y ) echo "Starting import...";; n|N ) echo "Abort!"; exit 1;; * ) echo "invalid"; exit 1;; @@ -80,41 +81,96 @@ esac echo "Preparing to upload '${AZURE_IMAGE_FILE} to Azure." -SIZE=$(wc -c ${AZURE_IMAGE_FILE} | cut -d " " -f1) +SIZE=$(wc -c "${AZURE_IMAGE_FILE}" | cut -d " " -f1) echo "Size is ${SIZE} bytes." echo "Creating disk (${AZURE_IMAGE_NAME}) as import target." -az disk create -n ${AZURE_IMAGE_NAME} -g ${AZURE_RESOURCE_GROUP_NAME} -l ${AZURE_REGION} --hyper-v-generation V2 --os-type Linux --for-upload --upload-size-bytes ${SIZE} --sku standard_lrs +az disk create \ + -n "${AZURE_IMAGE_NAME}" \ + -g "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + --hyper-v-generation V2 \ + --os-type Linux \ + --for-upload \ + -upload-size-bytes "${SIZE}" \ + --sku standard_lrs echo "Waiting for disk to be created." -az disk wait --created -n ${AZURE_IMAGE_NAME} -g ${AZURE_RESOURCE_GROUP_NAME} +az disk wait --created -n "${AZURE_IMAGE_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" echo "Retrieving disk ID." -AZURE_DISK_ID=$(az disk list --query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output json | jq -r) +AZURE_DISK_ID=$(az disk list \ + --query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" \ + --output json \ + | jq -r \ +) echo "Disk ID is ${AZURE_DISK_ID}" echo "Generating SAS URL for authorized upload." -AZURE_SAS_URL=$(az disk grant-access -n ${AZURE_IMAGE_NAME} -g ${AZURE_RESOURCE_GROUP_NAME} --access-level Write --duration-in-seconds 86400 | jq -r .accessSas) +AZURE_SAS_URL=$(az disk grant-access \ + -n "${AZURE_IMAGE_NAME}" \ + -g "${AZURE_RESOURCE_GROUP_NAME}" \ + --access-level Write \ + --duration-in-seconds 86400 \ + | jq -r .accessSas \ +) echo "Uploading image file to Azure disk." -azcopy copy ${AZURE_IMAGE_FILE} ${AZURE_SAS_URL} --blob-type PageBlob +azcopy copy "${AZURE_IMAGE_FILE}" "${AZURE_SAS_URL}" --blob-type PageBlob echo "Finalizing upload." -az disk revoke-access -n ${AZURE_IMAGE_NAME} -g ${AZURE_RESOURCE_GROUP_NAME} +az disk revoke-access -n "${AZURE_IMAGE_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" echo "Creating Azure image." -az image create -g ${AZURE_RESOURCE_GROUP_NAME} -l ${AZURE_REGION} -n ${AZURE_IMAGE_NAME} --hyper-v-generation V2 --os-type Linux --source ${AZURE_DISK_ID} +az image create \ + -g "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + -n "${AZURE_IMAGE_NAME}" \ + --hyper-v-generation V2 \ + --os-type Linux \ + --source "${AZURE_DISK_ID}" echo "Creating Azure Shared Image Gallery." -az sig create -l ${AZURE_REGION} --gallery-name ${AZURE_GALLERY_NAME} --resource-group ${AZURE_RESOURCE_GROUP_NAME} +az sig create \ + -l "${AZURE_REGION}" \ + --gallery-name "${AZURE_GALLERY_NAME}" \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" echo "Creating Image Definition." -az sig image-definition create --resource-group ${AZURE_RESOURCE_GROUP_NAME} -l ${AZURE_REGION} --gallery-name ${AZURE_GALLERY_NAME} --gallery-image-definition ${AZURE_IMAGE_DEFINITION} --publisher ${AZURE_PUBLISHER} --offer ${AZURE_IMAGE_OFFER} --sku ${AZURE_SKU} --os-type Linux --os-state generalized --hyper-v-generation V2 --features SecurityType=${AZURE_SECURITY_TYPE} +az sig image-definition create \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + --gallery-name "${AZURE_GALLERY_NAME}" \ + --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ + --publisher "${AZURE_PUBLISHER}" \ + --offer "${AZURE_IMAGE_OFFER}" --sku "${AZURE_SKU}" \ + --os-type Linux \ + --os-state generalized \ + --hyper-v-generation V2 \ + --features SecurityType="${AZURE_SECURITY_TYPE}" echo "Retrieving temporary image ID." -AZURE_IMAGE_ID=$(az image list --query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output json | jq -r) +AZURE_IMAGE_ID=$(az image list \ + --query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" \ + --output json | jq -r \ +) echo "Creating final image version." -az sig image-version create --resource-group ${AZURE_RESOURCE_GROUP_NAME} -l ${AZURE_REGION} --gallery-name ${AZURE_GALLERY_NAME} --gallery-image-definition ${AZURE_IMAGE_DEFINITION} --gallery-image-version ${AZURE_IMAGE_VERSION} --target-regions ${AZURE_REGION} --replica-count 1 --managed-image ${AZURE_IMAGE_ID} +az sig image-version create \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + --gallery-name "${AZURE_GALLERY_NAME}" \ + --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ + --gallery-image-version "${AZURE_IMAGE_VERSION}" \ + --target-regions "${AZURE_REGION}" \ + --replica-count 1 \ + --managed-image "${AZURE_IMAGE_ID}" echo "Cleaning up ephemeral resources." -az image delete --ids ${AZURE_IMAGE_ID} -az disk delete -y --ids ${AZURE_DISK_ID} +az image delete --ids "${AZURE_IMAGE_ID}" +az disk delete -y --ids "${AZURE_DISK_ID}" -IMAGE_VERSION=$(az sig image-version show --resource-group ${AZURE_RESOURCE_GROUP_NAME} --gallery-name ${AZURE_GALLERY_NAME} --gallery-image-definition ${AZURE_IMAGE_DEFINITION} --gallery-image-version ${AZURE_IMAGE_VERSION} -o tsv --query id) +IMAGE_VERSION=$(az sig image-version show \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + --gallery-name "${AZURE_GALLERY_NAME}" \ + --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ + --gallery-image-version "${AZURE_IMAGE_VERSION}" \ + -o tsv \ + --query id \ +) echo "Image ID is ${IMAGE_VERSION}" # # Cleanup all diff --git a/image/measured-boot/measure_util.sh b/image/measured-boot/measure_util.sh index 3713806c9..10f084b4a 100644 --- a/image/measured-boot/measure_util.sh +++ b/image/measured-boot/measure_util.sh @@ -5,11 +5,14 @@ # This script contains shared functions for pcr calculation. +set -euo pipefail +shopt -s inherit_errexit + pcr_extend() { local CURRENT_PCR="$1" local EXTEND_WITH="$2" local HASH_FUNCTION="$3" - ( echo -n "$CURRENT_PCR" | xxd -r -p ; echo -n "$EXTEND_WITH" | xxd -r -p; ) | ${HASH_FUNCTION} | cut -d " " -f 1 + ( echo -n "${CURRENT_PCR}" | xxd -r -p ; echo -n "${EXTEND_WITH}" | xxd -r -p; ) | ${HASH_FUNCTION} | cut -d " " -f 1 } extract () { diff --git a/image/measured-boot/precalculate_pcr_4.sh b/image/measured-boot/precalculate_pcr_4.sh index d2d716ff4..2253be4aa 100755 --- a/image/measured-boot/precalculate_pcr_4.sh +++ b/image/measured-boot/precalculate_pcr_4.sh @@ -7,6 +7,7 @@ # Usage: precalculate_pcr_4.sh set -euo pipefail +shopt -s inherit_errexit source "$(dirname "$0")/measure_util.sh" ev_efi_action_sha256=3d6772b4f84ed47595d72a2c4c5ffd15f5bb72c7507fe26f2aaee2c69d5633ba @@ -46,8 +47,8 @@ trap 'cleanup "${DIR}"' EXIT extract "$1" "/efi/EFI/BOOT/BOOTX64.EFI" "${DIR}/01-shim.efi" extract "$1" "/efi/EFI/BOOT/grubx64.efi" "${DIR}/02-sd-boot.efi" extract "$1" "/efi/EFI/Linux" "${DIR}/uki" -sudo chown -R "$USER:$USER" "${DIR}/uki" -cp ${DIR}/uki/*.efi "${DIR}/03-uki.efi" +sudo chown -R "${USER}:${USER}" "${DIR}/uki" +cp "${DIR}"/uki/*.efi "${DIR}/03-uki.efi" shim_authentihash=$(authentihash "${DIR}/01-shim.efi") sd_boot_authentihash=$(authentihash "${DIR}/02-sd-boot.efi") diff --git a/image/measured-boot/precalculate_pcr_8.sh b/image/measured-boot/precalculate_pcr_8.sh index a870206a4..ad395cd82 100755 --- a/image/measured-boot/precalculate_pcr_8.sh +++ b/image/measured-boot/precalculate_pcr_8.sh @@ -10,6 +10,7 @@ # Usage: precalculate_pcr_8.sh set -euo pipefail +shopt -s inherit_errexit source "$(dirname "$0")/measure_util.sh" get_cmdline_from_uki () { @@ -20,7 +21,8 @@ get_cmdline_from_uki () { cmdline_measure () { local path="$1" - local tmp=$(mktemp) + local tmp + tmp=$(mktemp) # convert to utf-16le and add a null terminator iconv -f utf-8 -t utf-16le "${path}" -o "${tmp}" truncate -s +2 "${tmp}" @@ -46,8 +48,8 @@ DIR=$(mktempdir) trap 'cleanup "${DIR}"' EXIT extract "${IMAGE}" "/efi/EFI/Linux" "${DIR}/uki" -sudo chown -R "$USER:$USER" "${DIR}/uki" -cp ${DIR}/uki/*.efi "${DIR}/03-uki.efi" +sudo chown -R "${USER}:${USER}" "${DIR}/uki" +cp "${DIR}"/uki/*.efi "${DIR}/03-uki.efi" get_cmdline_from_uki "${DIR}/03-uki.efi" "${DIR}/cmdline" cmdline=$(cat "${DIR}/cmdline") @@ -56,7 +58,7 @@ cleanup "${DIR}" expected_pcr_8=0000000000000000000000000000000000000000000000000000000000000000 expected_pcr_8=$(pcr_extend "${expected_pcr_8}" "${cmdline_hash}" "sha256sum") -if [ "${CSP}" == "azure" ]; then +if [[ "${CSP}" == "azure" ]]; then # Azure displays the boot menu # triggering an extra measurement of the kernel command line. expected_pcr_8=$(pcr_extend "${expected_pcr_8}" "${cmdline_hash}" "sha256sum") diff --git a/image/measured-boot/precalculate_pcr_9.sh b/image/measured-boot/precalculate_pcr_9.sh index 0ba09dd1b..df4ff9078 100755 --- a/image/measured-boot/precalculate_pcr_9.sh +++ b/image/measured-boot/precalculate_pcr_9.sh @@ -8,6 +8,8 @@ # Usage: precalculate_pcr_9.sh set -euo pipefail +shopt -s inherit_errexit + source "$(dirname "$0")/measure_util.sh" get_initrd_from_uki () { @@ -36,8 +38,8 @@ DIR=$(mktempdir) trap 'cleanup "${DIR}"' EXIT extract "$1" "/efi/EFI/Linux" "${DIR}/uki" -sudo chown -R "$USER:$USER" "${DIR}/uki" -cp ${DIR}/uki/*.efi "${DIR}/03-uki.efi" +sudo chown -R "${USER}:${USER}" "${DIR}/uki" +cp "${DIR}"/uki/*.efi "${DIR}/03-uki.efi" get_initrd_from_uki "${DIR}/03-uki.efi" "${DIR}/initrd" initrd_hash=$(initrd_measure "${DIR}/initrd") diff --git a/image/mkosi.skeleton/etc/profile.d/constellation.sh b/image/mkosi.skeleton/etc/profile.d/constellation.sh index 0109d7a0f..c3dab9e8d 100755 --- a/image/mkosi.skeleton/etc/profile.d/constellation.sh +++ b/image/mkosi.skeleton/etc/profile.d/constellation.sh @@ -3,6 +3,9 @@ # # SPDX-License-Identifier: AGPL-3.0-only +set -euo pipefail +shopt -s inherit_errexit + export PATH=/run/state/bin:${PATH} export KUBECONFIG=/etc/kubernetes/admin.conf alias k=kubectl diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/azure-provisioning.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/azure-provisioning.sh index b04f47ec0..d77dbf460 100755 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/azure-provisioning.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/azure-provisioning.sh @@ -1,15 +1,18 @@ #!/usr/bin/env bash # source https://learn.microsoft.com/en-us/azure/virtual-machines/linux/no-agent +set -euo pipefail +shopt -s inherit_errexit + attempts=1 -until [ "$attempts" -gt 5 ] +until [[ "${attempts}" -gt 5 ]] do - echo "obtaining goal state - attempt $attempts" + echo "obtaining goal state - attempt ${attempts}" goalstate=$(curl --fail -v -X 'GET' -H "x-ms-agent-name: azure-vm-register" \ -H "Content-Type: text/xml;charset=utf-8" \ -H "x-ms-version: 2012-11-30" \ "http://168.63.129.16/machine/?comp=goalstate") - if [ $? -eq 0 ] + if [[ $? -eq 0 ]] then echo "successfully retrieved goal state" retrieved_goal_state=true @@ -19,24 +22,24 @@ do attempts=$((attempts+1)) done -if [ "$retrieved_goal_state" != "true" ] +if [[ "${retrieved_goal_state}" != "true" ]] then echo "failed to obtain goal state - cannot register this VM" exit 1 fi -container_id=$(grep ContainerId <<< "$goalstate" | sed 's/\s*<\/*ContainerId>//g' | sed 's/\r$//') -instance_id=$(grep InstanceId <<< "$goalstate" | sed 's/\s*<\/*InstanceId>//g' | sed 's/\r$//') +container_id=$(grep ContainerId <<< "${goalstate}" | sed 's/\s*<\/*ContainerId>//g' | sed 's/\r$//') +instance_id=$(grep InstanceId <<< "${goalstate}" | sed 's/\s*<\/*InstanceId>//g' | sed 's/\r$//') ready_doc=$(cat << EOF 1 - $container_id + ${container_id} - $instance_id + ${instance_id} Ready @@ -48,15 +51,15 @@ EOF ) attempts=1 -until [ "$attempts" -gt 5 ] +until [[ "${attempts}" -gt 5 ]] do - echo "registering with Azure - attempt $attempts" + echo "registering with Azure - attempt ${attempts}" curl --fail -v -X 'POST' -H "x-ms-agent-name: azure-vm-register" \ -H "Content-Type: text/xml;charset=utf-8" \ -H "x-ms-version: 2012-11-30" \ - -d "$ready_doc" \ + -d "${ready_doc}" \ "http://168.63.129.16/machine?comp=health" - if [ $? -eq 0 ] + if [[ $? -eq 0 ]] then echo "successfully register with Azure" break diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/module-setup.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/module-setup.sh index bc17f2bbf..064218bba 100755 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/module-setup.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/38azure-provision/module-setup.sh @@ -3,6 +3,9 @@ # # SPDX-License-Identifier: AGPL-3.0-only +set -euo pipefail +shopt -s inherit_errexit + depends() { echo systemd } @@ -10,8 +13,8 @@ depends() { install_and_enable_unit() { unit="$1"; shift target="$1"; shift - inst_simple "$moddir/$unit" "$systemdsystemunitdir/$unit" - mkdir -p "${initdir}${systemdsystemconfdir}/${target}.wants" + inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}" + mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants" ln_r "${systemdsystemunitdir}/${unit}" \ "${systemdsystemconfdir}/${target}.wants/${unit}" } @@ -23,7 +26,7 @@ install() { grep \ sed - inst_script "$moddir/azure-provisioning.sh" \ + inst_script "${moddir}/azure-provisioning.sh" \ "/usr/local/bin/azure-provisioning" install_and_enable_unit "azure-provisioning.service" \ "basic.target" diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/aws-nvme-disk.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/aws-nvme-disk.sh index 4533bf4bd..2586d4c49 100644 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/aws-nvme-disk.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/aws-nvme-disk.sh @@ -4,22 +4,22 @@ # SPDX-License-Identifier: AGPL-3.0-only set -euo pipefail -shopt -s extglob nullglob +shopt -s extglob nullglob inherit_errexit AWS_STATE_DISK_DEVICENAME="sdb" AWS_STATE_DISK_SYMLINK="/dev/${AWS_STATE_DISK_DEVICENAME}" # hack: aws nvme udev rules are never executed. Create symlinks for the nvme devices manually. -while [ ! -L "${AWS_STATE_DISK_SYMLINK}" ] +while [[ ! -L "${AWS_STATE_DISK_SYMLINK}" ]] do for nvmedisk in /dev/nvme*n1 do linkname=$(nvme amzn id-ctrl -b "${nvmedisk}" | tail -c +3072 | tr -d ' ') || true - if [ -n "${linkname}" ] && [ "${linkname}" == "${AWS_STATE_DISK_DEVICENAME}" ]; then + if [[ -n "${linkname}" ]] && [[ "${linkname}" == "${AWS_STATE_DISK_DEVICENAME}" ]]; then ln -s "${nvmedisk}" "${AWS_STATE_DISK_SYMLINK}" fi done - if [ -L "${AWS_STATE_DISK_SYMLINK}" ]; then + if [[ -L "${AWS_STATE_DISK_SYMLINK}" ]]; then break fi echo "Waiting for state disk to appear.." @@ -27,4 +27,4 @@ do done echo "AWS state disk found" -echo ${AWS_STATE_DISK_SYMLINK} → $(readlink -f "${AWS_STATE_DISK_SYMLINK}") +echo "${AWS_STATE_DISK_SYMLINK}" → "$(readlink -f "${AWS_STATE_DISK_SYMLINK}")" diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/module-setup.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/module-setup.sh index 947f5aa01..b86db8191 100644 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/module-setup.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/module-setup.sh @@ -11,8 +11,8 @@ depends() { install_and_enable_unit() { unit="$1"; shift target="$1"; shift - inst_simple "$moddir/$unit" "$systemdsystemunitdir/$unit" - mkdir -p "${initdir}${systemdsystemconfdir}/${target}.wants" + inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}" + mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants" ln_r "${systemdsystemunitdir}/${unit}" \ "${systemdsystemconfdir}/${target}.wants/${unit}" } @@ -28,7 +28,7 @@ install() { inst_script "/usr/sbin/disk-mapper" \ "/usr/sbin/disk-mapper" - inst_script "$moddir/prepare-state-disk.sh" \ + inst_script "${moddir}/prepare-state-disk.sh" \ "/usr/sbin/prepare-state-disk" install_and_enable_unit "prepare-state-disk.service" \ "basic.target" @@ -61,7 +61,7 @@ install() { "/usr/lib/udev/google_nvme_id" inst_rules "64-gce-disk-removal.rules" "65-gce-disk-naming.rules" - inst_script "$moddir/aws-nvme-disk.sh" \ + inst_script "${moddir}/aws-nvme-disk.sh" \ "/usr/sbin/aws-nvme-disk" install_and_enable_unit "aws-nvme-disk.service" \ "basic.target" @@ -72,5 +72,5 @@ install() { /etc/pki/tls/certs/ca-bundle.crt # backport of https://github.com/dracutdevs/dracut/commit/dcbe23c14d13ca335ad327b7bb985071ca442f12 - inst_simple "$moddir/sysusers-dracut.conf" "$systemdsystemunitdir/systemd-sysusers.service.d/sysusers-dracut.conf" + inst_simple "${moddir}/sysusers-dracut.conf" "${systemdsystemunitdir}/systemd-sysusers.service.d/sysusers-dracut.conf" } diff --git a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/prepare-state-disk.sh b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/prepare-state-disk.sh index 983d80af0..424514802 100644 --- a/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/prepare-state-disk.sh +++ b/image/mkosi.skeleton/usr/lib/dracut/modules.d/39constellation-mount/prepare-state-disk.sh @@ -4,6 +4,7 @@ # SPDX-License-Identifier: AGPL-3.0-only set -euo pipefail +shopt -s inherit_errexit # Prepare the encrypted volume by either initializing it with a random key or by aquiring the key from another bootstrapper. # Store encryption key (random or recovered key) in /run/cryptsetup-keys.d/state.key diff --git a/image/mkosi.skeleton/usr/lib/udev/google_nvme_id b/image/mkosi.skeleton/usr/lib/udev/google_nvme_id index 4f8c43f40..e8933e8c7 100755 --- a/image/mkosi.skeleton/usr/lib/udev/google_nvme_id +++ b/image/mkosi.skeleton/usr/lib/udev/google_nvme_id @@ -22,7 +22,6 @@ readonly nvme_cli_bin=/usr/sbin/nvme # Bash regex to parse device paths and controller identification readonly NAMESPACE_NUMBER_REGEX="/dev/nvme[[:digit:]]+n([[:digit:]]+).*" readonly PARTITION_NUMBER_REGEX="/dev/nvme[[:digit:]]+n[[:digit:]]+p([[:digit:]]+)" -readonly PD_NVME_REGEX="sn[[:space:]]+:[[:space]]+nvme_card-pd" # Globals used to generate the symlinks for a PD-NVMe disk. These are populated # by the identify_pd_disk function and exported for consumption by udev rules. @@ -55,26 +54,26 @@ function err() { ####################################### function get_namespace_device_name() { local nvme_json - nvme_json="$("$nvme_cli_bin" id-ns -b "$1" | xxd -p -seek 384 | xxd -p -r)" + nvme_json="$("${nvme_cli_bin}" id-ns -b "$1" | xxd -p -seek 384 | xxd -p -r)" if [[ $? -ne 0 ]]; then return 1 fi - if [[ -z "$nvme_json" ]]; then + if [[ -z "${nvme_json}" ]]; then err "NVMe Vendor Extension disk information not present" return 1 fi local device_name - device_name="$(echo "$nvme_json" | grep device_name | sed -e 's/.*"device_name":[ \t]*"\([a-zA-Z0-9_-]\+\)".*/\1/')" + device_name="$(echo "${nvme_json}" | grep device_name | sed -e 's/.*"device_name":[ \t]*"\([a-zA-Z0-9_-]\+\)".*/\1/')" # Error if our device name is empty - if [[ -z "$device_name" ]]; then + if [[ -z "${device_name}" ]]; then err "Empty name" return 1 fi - echo "$device_name" + echo "${device_name}" return 0 } @@ -92,13 +91,13 @@ function get_namespace_device_name() { function get_namespace_number() { local dev_path="$1" local namespace_number - if [[ "$dev_path" =~ $NAMESPACE_NUMBER_REGEX ]]; then + if [[ "${dev_path}" =~ ${NAMESPACE_NUMBER_REGEX} ]]; then namespace_number="${BASH_REMATCH[1]}" else return 1 fi - echo "$namespace_number" + echo "${namespace_number}" return 0 } @@ -115,9 +114,9 @@ function get_namespace_number() { function get_partition_number() { local dev_path="$1" local partition_number - if [[ "$dev_path" =~ $PARTITION_NUMBER_REGEX ]]; then + if [[ "${dev_path}" =~ ${PARTITION_NUMBER_REGEX} ]]; then partition_number="${BASH_REMATCH[1]}" - echo "$partition_number" + echo "${partition_number}" else echo '' fi @@ -134,12 +133,13 @@ function get_partition_number() { ####################################### function gen_symlink() { local dev_path="$1" - local partition_number="$(get_partition_number "$dev_path")" + local partition_number + partition_number="$(get_partition_number "${dev_path}")" - if [[ -n "$partition_number" ]]; then - ln -s "$dev_path" /dev/disk/by-id/google-"$ID_SERIAL_SHORT"-part"$partition_number" > /dev/null 2>&1 + if [[ -n "${partition_number}" ]]; then + ln -s "${dev_path}" /dev/disk/by-id/google-"${ID_SERIAL_SHORT}"-part"${partition_number}" > /dev/null 2>&1 else - ln -s "$dev_path" /dev/disk/by-id/google-"$ID_SERIAL_SHORT" > /dev/null 2>&1 + ln -s "${dev_path}" /dev/disk/by-id/google-"${ID_SERIAL_SHORT}" > /dev/null 2>&1 fi return 0 @@ -157,12 +157,12 @@ function gen_symlink() { function identify_pd_disk() { local dev_path="$1" local dev_name - dev_name="$(get_namespace_device_name "$dev_path")" + dev_name="$(get_namespace_device_name "${dev_path}")" if [[ $? -ne 0 ]]; then return 1 fi - ID_SERIAL_SHORT="$dev_name" + ID_SERIAL_SHORT="${dev_name}" ID_SERIAL="Google_PersistentDisk_${ID_SERIAL_SHORT}" return 0 } @@ -181,8 +181,8 @@ function main() { local device_path='' while getopts :d:sh flag; do - case "$flag" in - d) device_path="$OPTARG";; + case "${flag}" in + d) device_path="${OPTARG}";; s) opt_gen_symlink='true';; h) print_help_message return 0 @@ -194,13 +194,13 @@ function main() { esac done - if [[ -z "$device_path" ]]; then + if [[ -z "${device_path}" ]]; then echo "Device path (-d) argument required. Use -h for full usage." 1>&2 exit 1 fi # Ensure the nvme-cli command is installed - command -v "$nvme_cli_bin" > /dev/null 2>&1 + command -v "${nvme_cli_bin}" > /dev/null 2>&1 if [[ $? -ne 0 ]]; then err "The nvme utility (/usr/sbin/nvme) was not found. You may need to run \ with sudo or install nvme-cli." @@ -208,7 +208,7 @@ with sudo or install nvme-cli." fi # Ensure the passed device is actually an NVMe device - "$nvme_cli_bin" id-ctrl "$device_path" &>/dev/null + "${nvme_cli_bin}" id-ctrl "${device_path}" &>/dev/null if [[ $? -ne 0 ]]; then err "Passed device was not an NVMe device. (You may need to run this \ script as root/with sudo)." @@ -217,22 +217,22 @@ script as root/with sudo)." # Detect the type of attached nvme device local controller_id - controller_id=$("$nvme_cli_bin" id-ctrl "$device_path") - if [[ ! "$controller_id" =~ nvme_card-pd ]] ; then + controller_id=$("${nvme_cli_bin}" id-ctrl "${device_path}") + if [[ ! "${controller_id}" =~ nvme_card-pd ]] ; then err "Device is not a PD-NVMe device" return 1 fi # Fill the global variables for the id command for the given disk type # Error messages will be printed closer to error, no need to reprint here - identify_pd_disk "$device_path" + identify_pd_disk "${device_path}" if [[ $? -ne 0 ]]; then return $? fi # Gen symlinks or print out the globals set by the identify command - if [[ "$opt_gen_symlink" == 'true' ]]; then - gen_symlink "$device_path" + if [[ "${opt_gen_symlink}" == 'true' ]]; then + gen_symlink "${device_path}" else # These will be consumed by udev echo "ID_SERIAL_SHORT=${ID_SERIAL_SHORT}" diff --git a/image/secure-boot/aws/create_uefivars.sh b/image/secure-boot/aws/create_uefivars.sh index af78d6f54..982ec15bc 100755 --- a/image/secure-boot/aws/create_uefivars.sh +++ b/image/secure-boot/aws/create_uefivars.sh @@ -1,9 +1,11 @@ #!/usr/bin/env bash + set -euo pipefail +shopt -s inherit_errexit TMPDIR=$(mktemp -d /tmp/uefivars-XXXXXXXXXXXXXX) -git clone https://github.com/awslabs/python-uefivars ${TMPDIR} +git clone https://github.com/awslabs/python-uefivars "${TMPDIR}" -"${TMPDIR}/uefivars.py" -i none -o aws -O "$1" -P ${PKI}/PK.esl -K ${PKI}/KEK.esl --db ${PKI}/db.esl +"${TMPDIR}/uefivars.py" -i none -o aws -O "$1" -P "${PKI}"/PK.esl -K "${PKI}"/KEK.esl --db "${PKI}"/db.esl rm -rf "${TMPDIR}" diff --git a/image/secure-boot/azure/delete.sh b/image/secure-boot/azure/delete.sh index cb11ad9d4..f98843144 100755 --- a/image/secure-boot/azure/delete.sh +++ b/image/secure-boot/azure/delete.sh @@ -1,7 +1,10 @@ #!/usr/bin/env bash -set -euo pipefail -if [ -z "${CONFIG_FILE-}" ] && [ -f "${CONFIG_FILE-}" ]; then +set -euo pipefail +shopt -s inherit_errexit + +if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then + # shellcheck source=/dev/null . "${CONFIG_FILE}" fi POSITIONAL_ARGS=() @@ -13,7 +16,7 @@ while [[ $# -gt 0 ]]; do shift # past argument shift # past value ;; - -*|--*) + -*) echo "Unknown option $1" exit 1 ;; @@ -32,7 +35,7 @@ NIC_INFO=$(az network nic show --ids "${NIC}" -o json) PUBIP=$(echo "${NIC_INFO}" | jq -r '.ipConfigurations[0].publicIpAddress.id') NSG=$(echo "${NIC_INFO}" | jq -r '.networkSecurityGroup.id') SUBNET=$(echo "${NIC_INFO}" | jq -r '.ipConfigurations[0].subnet.id') -VNET=$(echo $SUBNET | sed 's#/subnets/.*##') +VNET=${SUBNET//\/subnets\/.*/} DISK=$(echo "${AZ_VM_INFO}" | jq -r '.storageProfile.osDisk.managedDisk.id') diff --git a/image/secure-boot/azure/extract_vmgs.sh b/image/secure-boot/azure/extract_vmgs.sh index f5f2089ad..cc1666f4c 100755 --- a/image/secure-boot/azure/extract_vmgs.sh +++ b/image/secure-boot/azure/extract_vmgs.sh @@ -1,7 +1,10 @@ #!/usr/bin/env bash -set -euo pipefail -if [ -z "${CONFIG_FILE-}" ] && [ -f "${CONFIG_FILE-}" ]; then +set -euo pipefail +shopt -s inherit_errexit + +if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then + # shellcheck source=/dev/null . "${CONFIG_FILE}" fi AZURE_SUBSCRIPTION=$(az account show --query id -o tsv) @@ -14,7 +17,7 @@ while [[ $# -gt 0 ]]; do shift # past argument shift # past value ;; - -*|--*) + -*) echo "Unknown option $1" exit 1 ;; diff --git a/image/secure-boot/azure/launch.sh b/image/secure-boot/azure/launch.sh index 6ddc84d60..be0e6a2ac 100755 --- a/image/secure-boot/azure/launch.sh +++ b/image/secure-boot/azure/launch.sh @@ -1,7 +1,10 @@ #!/usr/bin/env bash -set -euo pipefail -if [ -z "${CONFIG_FILE-}" ] && [ -f "${CONFIG_FILE-}" ]; then +set -euo pipefail +shopt -s inherit_errexit + +if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then + # shellcheck source=/dev/null . "${CONFIG_FILE}" fi POSITIONAL_ARGS=() @@ -31,7 +34,7 @@ while [[ $# -gt 0 ]]; do shift # past argument shift # past value ;; - -*|--*) + -*) echo "Unknown option $1" exit 1 ;; @@ -54,10 +57,10 @@ else fi create_vm_from_disk () { - AZURE_DISK_REFERENCE=$(az disk show --resource-group ${AZURE_RESOURCE_GROUP_NAME} --name ${AZURE_DISK_NAME} --query id -o tsv) + AZURE_DISK_REFERENCE=$(az disk show --resource-group "${AZURE_RESOURCE_GROUP_NAME}" --name "${AZURE_DISK_NAME}" --query id -o tsv) az vm create --name "${AZURE_VM_NAME}" \ --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ - -l ${AZURE_REGION} \ + -l "${AZURE_REGION}" \ --size "${VMSIZE}" \ --public-ip-sku Standard \ --os-type Linux \ @@ -79,7 +82,7 @@ create_vm_from_sig () { --query id -o tsv) az vm create --name "${AZURE_VM_NAME}" \ --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ - -l ${AZURE_REGION} \ + -l "${AZURE_REGION}" \ --size "${VMSIZE}" \ --public-ip-sku Standard \ --image "${AZURE_IMAGE_REFERENCE}" \ @@ -91,7 +94,7 @@ create_vm_from_sig () { --no-wait } -if [ "$CREATE_FROM_GALLERY" = "YES" ]; then +if [[ "${CREATE_FROM_GALLERY}" = "YES" ]]; then create_vm_from_sig else create_vm_from_disk diff --git a/image/secure-boot/generate_nvram_vars.sh b/image/secure-boot/generate_nvram_vars.sh index 457cf1426..f1ccffc89 100755 --- a/image/secure-boot/generate_nvram_vars.sh +++ b/image/secure-boot/generate_nvram_vars.sh @@ -3,6 +3,9 @@ # # SPDX-License-Identifier: AGPL-3.0-only +set -euo pipefail +shopt -s inherit_errexit + SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) BASE_DIR=$(realpath "${SCRIPT_DIR}/..") @@ -14,7 +17,7 @@ fi libvirt_nvram_gen () { local image_path="${1}" if test -f "${BASE_DIR}/image.nvram.template"; then - echo "NVRAM template already generated: $(realpath "--relative-to=$(pwd)" ${BASE_DIR}/image.nvram.template)" + echo "NVRAM template already generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)" return fi if ! test -f "${image_path}"; then @@ -36,7 +39,7 @@ libvirt_nvram_gen () { # generate nvram file using libvirt virt-install --name constell-nvram-gen \ - --connect ${LIBVIRT_SOCK} \ + --connect "${LIBVIRT_SOCK}" \ --nonetworks \ --description 'Constellation' \ --ram 1024 \ @@ -80,13 +83,13 @@ libvirt_nvram_gen () { echo -e ' Reboot and continue this script.' echo -e '' echo -e 'Press ENTER to continue after you followed one of the guides from above.' - read + read -r sudo cp "${BASE_DIR}/image.nvram" "${BASE_DIR}/image.nvram.template" virsh --connect "${LIBVIRT_SOCK}" destroy --domain constell-nvram-gen virsh --connect "${LIBVIRT_SOCK}" undefine --nvram constell-nvram-gen rm -f "${BASE_DIR}/image.nvram" - echo "NVRAM template generated: $(realpath "--relative-to=$(pwd)" ${BASE_DIR}/image.nvram.template)" + echo "NVRAM template generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)" } -libvirt_nvram_gen $1 +libvirt_nvram_gen "$1" diff --git a/image/secure-boot/genkeys.sh b/image/secure-boot/genkeys.sh index 9ea35696e..7fcc37fc0 100755 --- a/image/secure-boot/genkeys.sh +++ b/image/secure-boot/genkeys.sh @@ -9,27 +9,30 @@ # Release images are signed using a different set of keys. # Set PKI to an empty folder and PKI_SET to "dev". +set -euo pipefail +shopt -s inherit_errexit + SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) TEMPLATES=${SCRIPT_DIR}/templates BASE_DIR=$(realpath "${SCRIPT_DIR}/..") -if [ -z "${PKI}" ]; then +if [[ -z "${PKI}" ]]; then PKI=${BASE_DIR}/pki fi -if [ -z "${PKI_SET}" ]; then +if [[ -z "${PKI_SET}" ]]; then PKI_SET=dev fi gen_pki () { # Only use for non-production images. # Use real PKI for production images instead. - count=$(ls -1 ${PKI}/*.{key,crt,cer,esl,auth} 2>/dev/null | wc -l) - if [ $count != 0 ] + count=$(find "${PKI}" -maxdepth 1 \( -name '*.key' -o -name '*.crt' -o -name '*.cer' -o -name '*.esl' -o -name '*.auth' \) 2>/dev/null | wc -l) + if [[ "${count}" != 0 ]] then - echo PKI files $(ls -1 $(realpath "--relative-to=$(pwd)" ${PKI})/*.{key,crt,cer,esl,auth}) already exist + echo PKI files "$(ls -1 "$(realpath "--relative-to=$(pwd)" "${PKI}")"/*.{key,crt,cer,esl,auth})" already exist return fi mkdir -p "${PKI}" - pushd "${PKI}" + pushd "${PKI}" || exit 1 uuid=$(systemd-id128 new --uuid) for key in PK KEK db; do @@ -60,7 +63,7 @@ gen_pki () { sign-efi-sig-list -c PK.crt -k PK.key KEK KEK.esl KEK.auth sign-efi-sig-list -c KEK.crt -k KEK.key db db.esl db.auth - popd + popd || exit 1 } # gen_pki generates a PKI for testing purposes only. diff --git a/image/secure-boot/signed-shim.sh b/image/secure-boot/signed-shim.sh index de90d410a..8cdd27276 100755 --- a/image/secure-boot/signed-shim.sh +++ b/image/secure-boot/signed-shim.sh @@ -3,9 +3,11 @@ # # SPDX-License-Identifier: AGPL-3.0-only -set -euo pipefail # This script is used to add a signed shim to the image.raw file EFI partition after running `mkosi build`. +set -euo pipefail +shopt -s inherit_errexit + if (( $# != 1 )) then echo "Usage: $0 " @@ -16,8 +18,6 @@ fi SOURCE=https://kojipkgs.fedoraproject.org/packages/shim/15.6/2/x86_64/shim-x64-15.6-2.x86_64.rpm # EXPECTED_SHA512 is the SHA512 checksum of the signed shim RPM EXPECTED_SHA512=971978bddee95a6a134ef05c4d88cf5df41926e631de863b74ef772307f3e106c82c8f6889c18280d47187986abd774d8671c5be4b85b1b0bb3d1858b65d02cf -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) -BASE_DIR=$(realpath "${SCRIPT_DIR}/..") TMPDIR=$(mktemp -d) pushd "${TMPDIR}" @@ -26,7 +26,7 @@ curl -sL -o shim.rpm "${SOURCE}" echo "Checking SHA512 checksum of signed shim..." sha512sum -c <<< "${EXPECTED_SHA512} shim.rpm" rpm2cpio shim.rpm | cpio -idmv -echo $TMPDIR +echo "${TMPDIR}" popd @@ -45,5 +45,5 @@ cp "${MOUNTPOINT}/EFI/systemd/systemd-bootx64.efi" "${MOUNTPOINT}/EFI/BOOT/grubx rm -f "${MOUNTPOINT}"/*/*/{linux,initrd} umount "${MOUNTPOINT}" -rm -rf ${MOUNTPOINT} +rm -rf "${MOUNTPOINT}" rm -rf "${TMPDIR}" diff --git a/image/upload/pack.sh b/image/upload/pack.sh index e6cab6682..05a70dd38 100755 --- a/image/upload/pack.sh +++ b/image/upload/pack.sh @@ -4,6 +4,7 @@ # SPDX-License-Identifier: AGPL-3.0-only set -euo pipefail +shopt -s inherit_errexit # Show progress on pipes if `pv` is installed # Otherwise use plain cat @@ -24,9 +25,9 @@ pack () { unpacked_image_filename=disk.raw local tmp_tar_file tmp_tar_file=$(mktemp -t verity.XXXXXX.tar) - cp ${unpacked_image} "${unpacked_image_dir}/${unpacked_image_filename}" + cp "${unpacked_image}" "${unpacked_image_dir}/${unpacked_image_filename}" - case $cloudprovider in + case ${cloudprovider} in gcp) echo "📥 Packing GCP image..." @@ -39,7 +40,7 @@ pack () { azure) echo "📥 Packing Azure image..." truncate -s %1MiB "${unpacked_image_dir}/${unpacked_image_filename}" - qemu-img convert -p -f raw -O vpc -o force_size,subformat=fixed "${unpacked_image_dir}/${unpacked_image_filename}" "$packed_image" + qemu-img convert -p -f raw -O vpc -o force_size,subformat=fixed "${unpacked_image_dir}/${unpacked_image_filename}" "${packed_image}" echo " Repacked image stored in ${packed_image}" ;; @@ -49,11 +50,11 @@ pack () { ;; esac - rm -r ${unpacked_image_dir} + rm -r "${unpacked_image_dir}" } -if [ $# -ne 3 ]; then +if [[ $# -ne 3 ]]; then echo "Usage: $0 " exit 1 fi diff --git a/image/upload/upload_aws.sh b/image/upload/upload_aws.sh index 789ac13a9..3d74a8e89 100755 --- a/image/upload/upload_aws.sh +++ b/image/upload/upload_aws.sh @@ -4,8 +4,9 @@ # SPDX-License-Identifier: AGPL-3.0-only set -euo pipefail +shopt -s inherit_errexit -if [ -z "${CONFIG_FILE-}" ] && [ -f "${CONFIG_FILE-}" ]; then +if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then # shellcheck source=/dev/null . "${CONFIG_FILE}" fi @@ -108,8 +109,8 @@ create_ami_from_raw_disk() { } }' "${AWS_IMAGE_NAME}" "${AWS_BUCKET}" "${AWS_IMAGE_FILENAME}" > "${CONTAINERS_JSON}" IMPORT_SNAPSHOT=$(aws ec2 import-snapshot --region "${AWS_REGION}" --disk-container "file://${CONTAINERS_JSON}") - echo "$IMPORT_SNAPSHOT" - IMPORT_TASK_ID=$(echo "$IMPORT_SNAPSHOT" | jq -r '.ImportTaskId') + echo "${IMPORT_SNAPSHOT}" + IMPORT_TASK_ID=$(echo "${IMPORT_SNAPSHOT}" | jq -r '.ImportTaskId') aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}" wait_for_import "${IMPORT_TASK_ID}" AWS_SNAPSHOT=$(aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}" | jq -r '.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId') @@ -125,8 +126,9 @@ create_ami_from_raw_disk() { --block-device-mappings "DeviceName=/dev/xvda,Ebs={SnapshotId=${AWS_SNAPSHOT}}" \ --ena-support \ --tpm-support v2.0 \ - --uefi-data "$(cat "${AWS_EFIVARS_PATH}")") - IMAGE_ID=$(echo "$REGISTER_OUT" | jq -r '.ImageId') + --uefi-data "$(cat "${AWS_EFIVARS_PATH}")" \ + ) + IMAGE_ID=$(echo "${REGISTER_OUT}" | jq -r '.ImageId') AMI_FOR_REGION=( ["${AWS_REGION}"]="${IMAGE_ID}") tag_ami_with_backing_snapshot "${IMAGE_ID}" "${AWS_REGION}" make_ami_public "${IMAGE_ID}" "${AWS_REGION}" @@ -142,7 +144,7 @@ replicate_ami() { --source-image-id "${IMAGE_ID}" \ --region "${target_region}") local replicated_image_id - replicated_image_id=$(echo "$replicated_image_out" | jq -r '.ImageId') + replicated_image_id=$(echo "${replicated_image_out}" | jq -r '.ImageId') AMI_FOR_REGION["${target_region}"]=${replicated_image_id} echo "Replicated AMI as ${replicated_image_id} in ${target_region}" } diff --git a/image/upload/upload_azure.sh b/image/upload/upload_azure.sh index 9cd57518e..6bf251ee4 100755 --- a/image/upload/upload_azure.sh +++ b/image/upload/upload_azure.sh @@ -4,8 +4,10 @@ # SPDX-License-Identifier: AGPL-3.0-only set -euo pipefail +shopt -s inherit_errexit -if [ -z "${CONFIG_FILE-}" ] && [ -f "${CONFIG_FILE-}" ]; then +if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then + # shellcheck source=/dev/null . "${CONFIG_FILE}" fi @@ -24,7 +26,7 @@ while [[ $# -gt 0 ]]; do shift # past argument shift # past value ;; - -*|--*) + -*) echo "Unknown option $1" exit 1 ;; @@ -52,7 +54,7 @@ fi AZURE_CVM_ENCRYPTION_ARGS="" if [[ -n "${AZURE_SIG_VERSION_ENCRYPTION_TYPE-}" ]]; then AZURE_CVM_ENCRYPTION_ARGS=" --target-region-cvm-encryption " - for region in ${AZURE_REPLICATION_REGIONS}; do + for _ in ${AZURE_REPLICATION_REGIONS}; do AZURE_CVM_ENCRYPTION_ARGS=" ${AZURE_CVM_ENCRYPTION_ARGS} ${AZURE_SIG_VERSION_ENCRYPTION_TYPE}, " done fi @@ -82,17 +84,17 @@ create_disk_with_vmgs () { --security-type "${AZURE_DISK_SECURITY_TYPE}" az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']" - DISK_SAS=$(az disk grant-access -n ${AZURE_DISK_NAME} -g ${AZURE_RESOURCE_GROUP_NAME} \ + DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \ --access-level Write --duration-in-seconds 86400 \ ${AZURE_VMGS_PATH+"--secure-vm-guest-state-sas"}) azcopy copy "${AZURE_IMAGE_PATH}" \ - "$(echo $DISK_SAS | jq -r .accessSas)" \ + "$(echo "${DISK_SAS}" | jq -r .accessSas)" \ --blob-type PageBlob if [[ -z "${AZURE_VMGS_PATH}" ]]; then echo "No VMGS path provided - skipping VMGS upload" else azcopy copy "${AZURE_VMGS_PATH}" \ - "$(echo $DISK_SAS | jq -r .securityDataAccessSas)" \ + "$(echo "${DISK_SAS}" | jq -r .securityDataAccessSas)" \ --blob-type PageBlob fi az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" @@ -110,10 +112,10 @@ create_disk_without_vmgs () { --upload-type Upload az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']" - DISK_SAS=$(az disk grant-access -n ${AZURE_DISK_NAME} -g ${AZURE_RESOURCE_GROUP_NAME} \ + DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \ --access-level Write --duration-in-seconds 86400) azcopy copy "${AZURE_IMAGE_PATH}" \ - "$(echo $DISK_SAS | jq -r .accessSas)" \ + "$(echo "${DISK_SAS}" | jq -r .accessSas)" \ --blob-type PageBlob az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" } @@ -135,9 +137,9 @@ create_image () { return fi az image create \ - --resource-group ${AZURE_RESOURCE_GROUP_NAME} \ - -l ${AZURE_REGION} \ - -n ${AZURE_DISK_NAME} \ + --resource-group "${AZURE_RESOURCE_GROUP_NAME}" \ + -l "${AZURE_REGION}" \ + -n "${AZURE_DISK_NAME}" \ --hyper-v-generation V2 \ --os-type Linux \ --source "$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" @@ -152,10 +154,12 @@ delete_image () { create_sig_version () { if [[ -n "${AZURE_VMGS_PATH}" ]]; then - local DISK="$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" + local DISK + DISK="$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" local SOURCE="--os-snapshot ${DISK}" else - local IMAGE="$(az image list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" + local IMAGE + IMAGE="$(az image list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)" local SOURCE="--managed-image ${IMAGE}" fi az sig create -l "${AZURE_REGION}" --gallery-name "${AZURE_GALLERY_NAME}" --resource-group "${AZURE_RESOURCE_GROUP_NAME}" || true @@ -177,16 +181,16 @@ create_sig_version () { --gallery-name "${AZURE_GALLERY_NAME}" \ --gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \ --gallery-image-version "${AZURE_IMAGE_VERSION}" \ - --target-regions ${AZURE_REPLICATION_REGIONS} \ - ${AZURE_CVM_ENCRYPTION_ARGS} \ + --target-regions "${AZURE_REPLICATION_REGIONS}" \ + "${AZURE_CVM_ENCRYPTION_ARGS}" \ --replica-count 1 \ --replication-mode Full \ - ${SOURCE} + "${SOURCE}" } create_disk -if [ "$CREATE_SIG_VERSION" = "YES" ]; then +if [[ "${CREATE_SIG_VERSION}" = "YES" ]]; then create_image create_sig_version delete_image diff --git a/image/upload/upload_gcp.sh b/image/upload/upload_gcp.sh index 215d35716..9dd5453be 100755 --- a/image/upload/upload_gcp.sh +++ b/image/upload/upload_gcp.sh @@ -4,8 +4,10 @@ # SPDX-License-Identifier: AGPL-3.0-only set -euo pipefail +shopt -s inherit_errexit -if [ -z "${CONFIG_FILE-}" ] && [ -f "${CONFIG_FILE-}" ]; then +if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then + # shellcheck source=/dev/null . "${CONFIG_FILE}" fi