mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-08-07 22:42:22 -04:00
ci: format shellscripts
Signed-off-by: Paul Meyer <49727155+katexochen@users.noreply.github.com>
This commit is contained in:
parent
fd9dfb500d
commit
106b738fab
29 changed files with 923 additions and 888 deletions
56
.github/actions/constellation_create/aws-logs.sh
vendored
56
.github/actions/constellation_create/aws-logs.sh
vendored
|
@ -2,50 +2,50 @@
|
||||||
|
|
||||||
# Usage: ./aws-logs.sh <region>
|
# Usage: ./aws-logs.sh <region>
|
||||||
|
|
||||||
controlAutoscalingGroup=$(\
|
controlAutoscalingGroup=$(
|
||||||
terraform show -json | \
|
terraform show -json |
|
||||||
jq -r .'values.root_module.child_modules[] |
|
jq -r .'values.root_module.child_modules[] |
|
||||||
select(.address == "module.instance_group_control_plane") |
|
select(.address == "module.instance_group_control_plane") |
|
||||||
.resources[0].values.name' \
|
.resources[0].values.name'
|
||||||
)
|
)
|
||||||
workerAutoscalingGroup=$(\
|
workerAutoscalingGroup=$(
|
||||||
terraform show -json | \
|
terraform show -json |
|
||||||
jq -r .'values.root_module.child_modules[] |
|
jq -r .'values.root_module.child_modules[] |
|
||||||
select(.address == "module.instance_group_worker_nodes") |
|
select(.address == "module.instance_group_worker_nodes") |
|
||||||
.resources[0].values.name' \
|
.resources[0].values.name'
|
||||||
)
|
)
|
||||||
|
|
||||||
controlInstances=$(\
|
controlInstances=$(
|
||||||
aws autoscaling describe-auto-scaling-groups \
|
aws autoscaling describe-auto-scaling-groups \
|
||||||
--region "${1}" \
|
--region "${1}" \
|
||||||
--no-paginate \
|
--no-paginate \
|
||||||
--output json \
|
--output json \
|
||||||
--auto-scaling-group-names "${controlAutoscalingGroup}" | \
|
--auto-scaling-group-names "${controlAutoscalingGroup}" |
|
||||||
jq -r '.AutoScalingGroups[0].Instances[].InstanceId' \
|
jq -r '.AutoScalingGroups[0].Instances[].InstanceId'
|
||||||
)
|
)
|
||||||
workerInstances=$(\
|
workerInstances=$(
|
||||||
aws autoscaling describe-auto-scaling-groups \
|
aws autoscaling describe-auto-scaling-groups \
|
||||||
--region "${1}" \
|
--region "${1}" \
|
||||||
--no-paginate \
|
--no-paginate \
|
||||||
--output json \
|
--output json \
|
||||||
--auto-scaling-group-names "${workerAutoscalingGroup}" | \
|
--auto-scaling-group-names "${workerAutoscalingGroup}" |
|
||||||
jq -r '.AutoScalingGroups[0].Instances[].InstanceId' \
|
jq -r '.AutoScalingGroups[0].Instances[].InstanceId'
|
||||||
)
|
)
|
||||||
|
|
||||||
echo "Fetching logs from control planes: ${controlInstances}"
|
echo "Fetching logs from control planes: ${controlInstances}"
|
||||||
|
|
||||||
for instance in ${controlInstances}; do
|
for instance in ${controlInstances}; do
|
||||||
printf "Fetching for %s\n" "${instance}"
|
printf "Fetching for %s\n" "${instance}"
|
||||||
aws ec2 get-console-output --region "${1}" --instance-id "${instance}" | \
|
aws ec2 get-console-output --region "${1}" --instance-id "${instance}" |
|
||||||
jq -r .'Output' | \
|
jq -r .'Output' |
|
||||||
tail -n +2 > control-plane-"${instance}".log
|
tail -n +2 > control-plane-"${instance}".log
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Fetching logs from worker nodes: ${workerInstances}"
|
echo "Fetching logs from worker nodes: ${workerInstances}"
|
||||||
|
|
||||||
for instance in ${workerInstances}; do
|
for instance in ${workerInstances}; do
|
||||||
printf "Fetching for %s\n" "${instance}"
|
printf "Fetching for %s\n" "${instance}"
|
||||||
aws ec2 get-console-output --region "${1}" --instance-id "${instance}" | \
|
aws ec2 get-console-output --region "${1}" --instance-id "${instance}" |
|
||||||
jq -r .'Output' | \
|
jq -r .'Output' |
|
||||||
tail -n +2 > worker-"${instance}".log
|
tail -n +2 > worker-"${instance}".log
|
||||||
done
|
done
|
||||||
|
|
29
.github/actions/constellation_create/az-logs.sh
vendored
29
.github/actions/constellation_create/az-logs.sh
vendored
|
@ -8,17 +8,28 @@ printf "Fetching logs of instances in resource group %s\n" "${1}"
|
||||||
# get list of all scale sets
|
# get list of all scale sets
|
||||||
scalesetsjson=$(az vmss list --resource-group "${1}" -o json)
|
scalesetsjson=$(az vmss list --resource-group "${1}" -o json)
|
||||||
scalesetslist=$(echo "${scalesetsjson}" | jq -r '.[] | .name')
|
scalesetslist=$(echo "${scalesetsjson}" | jq -r '.[] | .name')
|
||||||
subscription=$(az account show | jq -r .id)
|
subscription=$(az account show | jq -r .id)
|
||||||
|
|
||||||
printf "Checking scalesets %s\n" "${scalesetslist}"
|
printf "Checking scalesets %s\n" "${scalesetslist}"
|
||||||
|
|
||||||
for scaleset in ${scalesetslist}; do
|
for scaleset in ${scalesetslist}; do
|
||||||
instanceids=$(az vmss list-instances --resource-group "${1}" --name "${scaleset}" -o json | jq -r '.[] | .instanceId')
|
instanceids=$(
|
||||||
printf "Checking instance IDs %s\n" "${instanceids}"
|
az vmss list-instances \
|
||||||
for instanceid in ${instanceids}; do
|
--resource-group "${1}" \
|
||||||
bloburi=$(az rest --method post --url https://management.azure.com/subscriptions/"${subscription}"/resourceGroups/"${1}"/providers/Microsoft.Compute/virtualMachineScaleSets/"${scaleset}"/virtualmachines/"${instanceid}"/retrieveBootDiagnosticsData?api-version=2022-03-01 | jq '.serialConsoleLogBlobUri' -r)
|
--name "${scaleset}" \
|
||||||
sleep 4
|
-o json |
|
||||||
curl -sL -o "./${scaleset}-${instanceid}.log" "${bloburi}"
|
jq -r '.[] | .instanceId'
|
||||||
realpath "./${scaleset}-${instanceid}.log"
|
)
|
||||||
done
|
printf "Checking instance IDs %s\n" "${instanceids}"
|
||||||
|
for instanceid in ${instanceids}; do
|
||||||
|
bloburi=$(
|
||||||
|
az rest \
|
||||||
|
--method post \
|
||||||
|
--url https://management.azure.com/subscriptions/"${subscription}"/resourceGroups/"${1}"/providers/Microsoft.Compute/virtualMachineScaleSets/"${scaleset}"/virtualmachines/"${instanceid}"/retrieveBootDiagnosticsData?api-version=2022-03-01 |
|
||||||
|
jq '.serialConsoleLogBlobUri' -r
|
||||||
|
)
|
||||||
|
sleep 4
|
||||||
|
curl -sL -o "./${scaleset}-${instanceid}.log" "${bloburi}"
|
||||||
|
realpath "./${scaleset}-${instanceid}.log"
|
||||||
|
done
|
||||||
done
|
done
|
||||||
|
|
|
@ -3,9 +3,9 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
controlInstanceGroup=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.base_instance_name' )
|
controlInstanceGroup=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.base_instance_name')
|
||||||
workerInstanceGroup=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_worker") | .resources[0].values.base_instance_name')
|
workerInstanceGroup=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_worker") | .resources[0].values.base_instance_name')
|
||||||
zone=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.zone' )
|
zone=$(terraform show -json | jq -r .'values.root_module.child_modules[] | select(.address == "module.instance_group_control_plane") | .resources[0].values.zone')
|
||||||
|
|
||||||
controlInstanceGroup=${controlInstanceGroup##*/}
|
controlInstanceGroup=${controlInstanceGroup##*/}
|
||||||
workerInstanceGroupShort=${workerInstanceGroup##*/}
|
workerInstanceGroupShort=${workerInstanceGroup##*/}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
SCRIPTDIR="$(dirname -- "$(realpath "${BASH_SOURCE[0]}")"; )";
|
SCRIPTDIR="$(dirname -- "$(realpath "${BASH_SOURCE[0]}")")"
|
||||||
RG=$(jq -r .azureresourcegroup constellation-state.json)
|
RG=$(jq -r .azureresourcegroup constellation-state.json)
|
||||||
SUBNET=$(jq -r .azuresubnet constellation-state.json)
|
SUBNET=$(jq -r .azuresubnet constellation-state.json)
|
||||||
VNET=${SUBNET%"/subnets/nodeNetwork"}
|
VNET=${SUBNET%"/subnets/nodeNetwork"}
|
||||||
|
@ -12,16 +12,22 @@ DEPLOYMENT_NAME=jump-host
|
||||||
VM_NAME=jump-host
|
VM_NAME=jump-host
|
||||||
|
|
||||||
az deployment group create \
|
az deployment group create \
|
||||||
-o none \
|
-o none \
|
||||||
--name "${DEPLOYMENT_NAME}" \
|
--name "${DEPLOYMENT_NAME}" \
|
||||||
--resource-group "${RG}" \
|
--resource-group "${RG}" \
|
||||||
--template-file "${SCRIPTDIR}/template.json" \
|
--template-file "${SCRIPTDIR}/template.json" \
|
||||||
--parameters "@${SCRIPTDIR}/parameters.json" \
|
--parameters "@${SCRIPTDIR}/parameters.json" \
|
||||||
--parameters "{ \"virtualNetworkId\": { \"value\": \"${VNET}\" } }" \
|
--parameters "{ \"virtualNetworkId\": { \"value\": \"${VNET}\" } }" \
|
||||||
--parameters "{ \"subnetRef\": { \"value\": \"${SUBNET}\" } }" \
|
--parameters "{ \"subnetRef\": { \"value\": \"${SUBNET}\" } }" \
|
||||||
--parameters "{ \"adminPublicKey\": { \"value\": \"${PUBKEY}\" } }"
|
--parameters "{ \"adminPublicKey\": { \"value\": \"${PUBKEY}\" } }"
|
||||||
az deployment group wait --created --name "${DEPLOYMENT_NAME}" --resource-group "${RG}"
|
az deployment group wait --created --name "${DEPLOYMENT_NAME}" --resource-group "${RG}"
|
||||||
PUBIP=$(az vm list-ip-addresses --resource-group "${RG}" --name "${VM_NAME}" --query "[].virtualMachine.network.publicIpAddresses[0].ipAddress" --output tsv)
|
PUBIP=$(
|
||||||
|
az vm list-ip-addresses \
|
||||||
|
--resource-group "${RG}" \
|
||||||
|
--name "${VM_NAME}" \
|
||||||
|
--query "[].virtualMachine.network.publicIpAddresses[0].ipAddress" \
|
||||||
|
--output tsv
|
||||||
|
)
|
||||||
echo "Jump host created. Cleanup by deleteing the resource group."
|
echo "Jump host created. Cleanup by deleteing the resource group."
|
||||||
echo "Connect to the jump host with the following command:"
|
echo "Connect to the jump host with the following command:"
|
||||||
echo -e "ssh azureuser@${PUBIP}\n"
|
echo -e "ssh azureuser@${PUBIP}\n"
|
||||||
|
|
|
@ -13,44 +13,44 @@ not_allowed() {
|
||||||
go mod download
|
go mod download
|
||||||
|
|
||||||
go-licenses csv ./... | {
|
go-licenses csv ./... | {
|
||||||
while read -r line; do
|
while read -r line; do
|
||||||
|
|
||||||
pkg=${line%%,*}
|
pkg=${line%%,*}
|
||||||
lic=${line##*,}
|
lic=${line##*,}
|
||||||
|
|
||||||
case ${lic} in
|
case ${lic} in
|
||||||
Apache-2.0|BSD-2-Clause|BSD-3-Clause|ISC|MIT)
|
Apache-2.0 | BSD-2-Clause | BSD-3-Clause | ISC | MIT) ;;
|
||||||
;;
|
|
||||||
|
|
||||||
MPL-2.0)
|
\
|
||||||
|
MPL-2.0)
|
||||||
case ${pkg} in
|
case ${pkg} in
|
||||||
github.com/talos-systems/talos/pkg/machinery/config/encoder)
|
github.com/talos-systems/talos/pkg/machinery/config/encoder) ;;
|
||||||
;;
|
|
||||||
github.com/letsencrypt/boulder)
|
github.com/letsencrypt/boulder) ;;
|
||||||
;;
|
|
||||||
github.com/hashicorp/*)
|
github.com/hashicorp/*) ;;
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
not_allowed
|
not_allowed
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
|
|
||||||
AGPL-3.0)
|
AGPL-3.0)
|
||||||
case ${pkg} in
|
case ${pkg} in
|
||||||
github.com/edgelesssys/constellation/v2)
|
github.com/edgelesssys/constellation/v2) ;;
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
not_allowed
|
not_allowed
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
|
|
||||||
Unknown)
|
Unknown)
|
||||||
case ${pkg} in
|
case ${pkg} in
|
||||||
*)
|
*)
|
||||||
not_allowed
|
not_allowed
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
@ -58,8 +58,8 @@ while read -r line; do
|
||||||
echo "unknown license: ${line}"
|
echo "unknown license: ${line}"
|
||||||
err=1
|
err=1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
done
|
done
|
||||||
exit "${err}"
|
exit "${err}"
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,20 +3,36 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
LATEST_AZURE_RUNS=$(gh run list -R edgelesssys/constellation -w 'e2e Test Azure' --json databaseId -q '.[].databaseId')
|
LATEST_AZURE_RUNS=$(
|
||||||
|
gh run list \
|
||||||
|
-R edgelesssys/constellation \
|
||||||
|
-w 'e2e Test Azure' \
|
||||||
|
--json databaseId \
|
||||||
|
-q '.[].databaseId'
|
||||||
|
)
|
||||||
echo "${LATEST_AZURE_RUNS}"
|
echo "${LATEST_AZURE_RUNS}"
|
||||||
for RUN_ID in ${LATEST_AZURE_RUNS}
|
for RUN_ID in ${LATEST_AZURE_RUNS}; do
|
||||||
do
|
# Might fail, because no state was written, because e2e pipeline failed early
|
||||||
# Might fail, because no state was written, because e2e pipeline failed early
|
# Or, because state was downloaded by earlier run of this script
|
||||||
# Or, because state was downloaded by earlier run of this script
|
gh run download "${RUN_ID}" \
|
||||||
gh run download "${RUN_ID}" -R edgelesssys/constellation -n constellation-state.json -D azure/"${RUN_ID}" || true
|
-R edgelesssys/constellation \
|
||||||
|
-n constellation-state.json \
|
||||||
|
-D azure/"${RUN_ID}" || true
|
||||||
done
|
done
|
||||||
|
|
||||||
LATEST_GCP_RUNS=$(gh run list -R edgelesssys/constellation -w 'e2e Test GCP' --json databaseId -q '.[].databaseId')
|
LATEST_GCP_RUNS=$(
|
||||||
|
gh run list \
|
||||||
|
-R edgelesssys/constellation \
|
||||||
|
-w 'e2e Test GCP' \
|
||||||
|
--json databaseId \
|
||||||
|
-q '.[].databaseId'
|
||||||
|
)
|
||||||
echo "${LATEST_GCP_RUNS}"
|
echo "${LATEST_GCP_RUNS}"
|
||||||
for RUN_ID in ${LATEST_GCP_RUNS}
|
for RUN_ID in ${LATEST_GCP_RUNS}; do
|
||||||
do
|
# Might fail, because no state was written, because e2e pipeline failed early
|
||||||
# Might fail, because no state was written, because e2e pipeline failed early
|
# Or, because state was downloaded by earlier run of this script
|
||||||
# Or, because state was downloaded by earlier run of this script
|
gh run download "${RUN_ID}" \
|
||||||
gh run download "${RUN_ID}" -R edgelesssys/constellation -n constellation-state.json -D gcp/"${RUN_ID}" || true
|
-R edgelesssys/constellation \
|
||||||
|
-n constellation-state.json \
|
||||||
|
-D gcp/"${RUN_ID}" || true
|
||||||
done
|
done
|
||||||
|
|
|
@ -4,11 +4,10 @@ set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
TO_DELETE=$(grep -lr "\"uid\": \"${1}\"" . || true)
|
TO_DELETE=$(grep -lr "\"uid\": \"${1}\"" . || true)
|
||||||
if [[ -z "${TO_DELETE}" ]]
|
if [[ -z ${TO_DELETE} ]]; then
|
||||||
then
|
printf "Unable to find '%s'\n" "${1}"
|
||||||
printf "Unable to find '%s'\n" "${1}"
|
|
||||||
else
|
else
|
||||||
printf "Statefile found. You should run:\n\n"
|
printf "Statefile found. You should run:\n\n"
|
||||||
printf "cd %s\n" "${TO_DELETE}"
|
printf "cd %s\n" "${TO_DELETE}"
|
||||||
printf "constellation terminate --yes\n\n"
|
printf "constellation terminate --yes\n\n"
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -18,23 +18,20 @@ set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
# Required tools
|
# Required tools
|
||||||
if ! command -v az &> /dev/null
|
if ! command -v az &> /dev/null; then
|
||||||
then
|
echo "az CLI could not be found"
|
||||||
echo "az CLI could not be found"
|
echo "Please instal it from: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli"
|
||||||
echo "Please instal it from: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli"
|
exit
|
||||||
exit
|
|
||||||
fi
|
fi
|
||||||
if ! command -v azcopy &> /dev/null
|
if ! command -v azcopy &> /dev/null; then
|
||||||
then
|
echo "azcopy could not be found"
|
||||||
echo "azcopy could not be found"
|
echo "Please instal it from: https://docs.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-v10"
|
||||||
echo "Please instal it from: https://docs.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-v10"
|
exit
|
||||||
exit
|
|
||||||
fi
|
fi
|
||||||
if ! command -v jq &> /dev/null
|
if ! command -v jq &> /dev/null; then
|
||||||
then
|
echo "jq could not be found"
|
||||||
echo "jq could not be found"
|
echo "Please instal it from: https://github.com/stedolan/jq"
|
||||||
echo "Please instal it from: https://github.com/stedolan/jq"
|
exit
|
||||||
exit
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AZURE_IMAGE_FILE="${AZURE_IMAGE_FILE:-$(pwd)/abcd}"
|
AZURE_IMAGE_FILE="${AZURE_IMAGE_FILE:-$(pwd)/abcd}"
|
||||||
|
@ -47,17 +44,16 @@ AZURE_IMAGE_DEFINITION="${AZURE_IMAGE_DEFINITION:-constellation}"
|
||||||
AZURE_SKU="${AZURE_SKU:-constellation}"
|
AZURE_SKU="${AZURE_SKU:-constellation}"
|
||||||
AZURE_SECURITY_TYPE="${AZURE_SECURITY_TYPE:-TrustedLaunch}"
|
AZURE_SECURITY_TYPE="${AZURE_SECURITY_TYPE:-TrustedLaunch}"
|
||||||
|
|
||||||
if [[ -z "${AZURE_RESOURCE_GROUP_NAME}" ]]; then
|
if [[ -z ${AZURE_RESOURCE_GROUP_NAME} ]]; then
|
||||||
echo "Please provide a value for AZURE_RESOURCE_GROUP_NAME."
|
echo "Please provide a value for AZURE_RESOURCE_GROUP_NAME."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -z "${AZURE_IMAGE_VERSION}" ]]; then
|
if [[ -z ${AZURE_IMAGE_VERSION} ]]; then
|
||||||
echo "Please provide a value for AZURE_IMAGE_VERSION of pattern <major>.<minor>.<patch>"
|
echo "Please provide a value for AZURE_IMAGE_VERSION of pattern <major>.<minor>.<patch>"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
echo "Using following settings:"
|
echo "Using following settings:"
|
||||||
echo "AZURE_REGION=${AZURE_REGION}"
|
echo "AZURE_REGION=${AZURE_REGION}"
|
||||||
echo "AZURE_RESOURCE_GROUP_NAME=${AZURE_RESOURCE_GROUP_NAME}"
|
echo "AZURE_RESOURCE_GROUP_NAME=${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
|
@ -74,9 +70,15 @@ echo ""
|
||||||
|
|
||||||
read -r -p "Continue (y/n)?" choice
|
read -r -p "Continue (y/n)?" choice
|
||||||
case "${choice}" in
|
case "${choice}" in
|
||||||
y|Y ) echo "Starting import...";;
|
y | Y) echo "Starting import..." ;;
|
||||||
n|N ) echo "Abort!"; exit 1;;
|
n | N)
|
||||||
* ) echo "invalid"; exit 1;;
|
echo "Abort!"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "invalid"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
echo "Preparing to upload '${AZURE_IMAGE_FILE} to Azure."
|
echo "Preparing to upload '${AZURE_IMAGE_FILE} to Azure."
|
||||||
|
@ -97,20 +99,22 @@ az disk create \
|
||||||
echo "Waiting for disk to be created."
|
echo "Waiting for disk to be created."
|
||||||
az disk wait --created -n "${AZURE_IMAGE_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
az disk wait --created -n "${AZURE_IMAGE_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
echo "Retrieving disk ID."
|
echo "Retrieving disk ID."
|
||||||
AZURE_DISK_ID=$(az disk list \
|
AZURE_DISK_ID=$(
|
||||||
|
az disk list \
|
||||||
--query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" \
|
--query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" \
|
||||||
--output json \
|
--output json |
|
||||||
| jq -r \
|
jq -r
|
||||||
)
|
)
|
||||||
echo "Disk ID is ${AZURE_DISK_ID}"
|
echo "Disk ID is ${AZURE_DISK_ID}"
|
||||||
|
|
||||||
echo "Generating SAS URL for authorized upload."
|
echo "Generating SAS URL for authorized upload."
|
||||||
AZURE_SAS_URL=$(az disk grant-access \
|
AZURE_SAS_URL=$(
|
||||||
|
az disk grant-access \
|
||||||
-n "${AZURE_IMAGE_NAME}" \
|
-n "${AZURE_IMAGE_NAME}" \
|
||||||
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
--access-level Write \
|
--access-level Write \
|
||||||
--duration-in-seconds 86400 \
|
--duration-in-seconds 86400 |
|
||||||
| jq -r .accessSas \
|
jq -r .accessSas
|
||||||
)
|
)
|
||||||
echo "Uploading image file to Azure disk."
|
echo "Uploading image file to Azure disk."
|
||||||
azcopy copy "${AZURE_IMAGE_FILE}" "${AZURE_SAS_URL}" --blob-type PageBlob
|
azcopy copy "${AZURE_IMAGE_FILE}" "${AZURE_SAS_URL}" --blob-type PageBlob
|
||||||
|
@ -143,9 +147,10 @@ az sig image-definition create \
|
||||||
--hyper-v-generation V2 \
|
--hyper-v-generation V2 \
|
||||||
--features SecurityType="${AZURE_SECURITY_TYPE}"
|
--features SecurityType="${AZURE_SECURITY_TYPE}"
|
||||||
echo "Retrieving temporary image ID."
|
echo "Retrieving temporary image ID."
|
||||||
AZURE_IMAGE_ID=$(az image list \
|
AZURE_IMAGE_ID=$(
|
||||||
|
az image list \
|
||||||
--query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" \
|
--query "[?name == '${AZURE_IMAGE_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" \
|
||||||
--output json | jq -r \
|
--output json | jq -r
|
||||||
)
|
)
|
||||||
|
|
||||||
echo "Creating final image version."
|
echo "Creating final image version."
|
||||||
|
@ -163,13 +168,14 @@ echo "Cleaning up ephemeral resources."
|
||||||
az image delete --ids "${AZURE_IMAGE_ID}"
|
az image delete --ids "${AZURE_IMAGE_ID}"
|
||||||
az disk delete -y --ids "${AZURE_DISK_ID}"
|
az disk delete -y --ids "${AZURE_DISK_ID}"
|
||||||
|
|
||||||
IMAGE_VERSION=$(az sig image-version show \
|
IMAGE_VERSION=$(
|
||||||
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
az sig image-version show \
|
||||||
--gallery-name "${AZURE_GALLERY_NAME}" \
|
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
--gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \
|
--gallery-name "${AZURE_GALLERY_NAME}" \
|
||||||
--gallery-image-version "${AZURE_IMAGE_VERSION}" \
|
--gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \
|
||||||
-o tsv \
|
--gallery-image-version "${AZURE_IMAGE_VERSION}" \
|
||||||
--query id \
|
-o tsv \
|
||||||
|
--query id
|
||||||
)
|
)
|
||||||
echo "Image ID is ${IMAGE_VERSION}"
|
echo "Image ID is ${IMAGE_VERSION}"
|
||||||
|
|
||||||
|
|
|
@ -9,24 +9,27 @@ set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
pcr_extend() {
|
pcr_extend() {
|
||||||
local CURRENT_PCR="$1"
|
local CURRENT_PCR="$1"
|
||||||
local EXTEND_WITH="$2"
|
local EXTEND_WITH="$2"
|
||||||
local HASH_FUNCTION="$3"
|
local HASH_FUNCTION="$3"
|
||||||
( echo -n "${CURRENT_PCR}" | xxd -r -p ; echo -n "${EXTEND_WITH}" | xxd -r -p; ) | ${HASH_FUNCTION} | cut -d " " -f 1
|
(
|
||||||
|
echo -n "${CURRENT_PCR}" | xxd -r -p
|
||||||
|
echo -n "${EXTEND_WITH}" | xxd -r -p
|
||||||
|
) | ${HASH_FUNCTION} | cut -d " " -f 1
|
||||||
}
|
}
|
||||||
|
|
||||||
extract () {
|
extract() {
|
||||||
local image="$1"
|
local image="$1"
|
||||||
local path="$2"
|
local path="$2"
|
||||||
local output="$3"
|
local output="$3"
|
||||||
sudo systemd-dissect --copy-from "${image}" "${path}" "${output}"
|
sudo systemd-dissect --copy-from "${image}" "${path}" "${output}"
|
||||||
}
|
}
|
||||||
|
|
||||||
mktempdir () {
|
mktempdir() {
|
||||||
mktemp -d
|
mktemp -d
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup () {
|
cleanup() {
|
||||||
local dir="$1"
|
local dir="$1"
|
||||||
rm -rf "${dir}"
|
rm -rf "${dir}"
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,14 +13,14 @@ source "$(dirname "$0")/measure_util.sh"
|
||||||
ev_efi_action_sha256=3d6772b4f84ed47595d72a2c4c5ffd15f5bb72c7507fe26f2aaee2c69d5633ba
|
ev_efi_action_sha256=3d6772b4f84ed47595d72a2c4c5ffd15f5bb72c7507fe26f2aaee2c69d5633ba
|
||||||
ev_efi_separator_sha256=df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119
|
ev_efi_separator_sha256=df3f619804a92fdb4057192dc43dd748ea778adc52bc498ce80524c014b81119
|
||||||
|
|
||||||
authentihash () {
|
authentihash() {
|
||||||
local path="$1"
|
local path="$1"
|
||||||
"$(dirname "$0")/extract_authentihash.py" "${path}"
|
"$(dirname "$0")/extract_authentihash.py" "${path}"
|
||||||
}
|
}
|
||||||
|
|
||||||
write_output () {
|
write_output() {
|
||||||
local out="$1"
|
local out="$1"
|
||||||
cat > "${out}" <<EOF
|
cat > "${out}" << EOF
|
||||||
{
|
{
|
||||||
"pcr4": "${expected_pcr_4}",
|
"pcr4": "${expected_pcr_4}",
|
||||||
"efistages": [
|
"efistages": [
|
||||||
|
@ -63,9 +63,9 @@ expected_pcr_4=$(pcr_extend "${expected_pcr_4}" "${sd_boot_authentihash}" "sha25
|
||||||
expected_pcr_4=$(pcr_extend "${expected_pcr_4}" "${uki_authentihash}" "sha256sum")
|
expected_pcr_4=$(pcr_extend "${expected_pcr_4}" "${uki_authentihash}" "sha256sum")
|
||||||
|
|
||||||
echo "Authentihashes:"
|
echo "Authentihashes:"
|
||||||
echo "Stage 1 – shim: ${shim_authentihash}"
|
echo "Stage 1 - shim: ${shim_authentihash}"
|
||||||
echo "Stage 2 – sd-boot: ${sd_boot_authentihash}"
|
echo "Stage 2 - sd-boot: ${sd_boot_authentihash}"
|
||||||
echo "Stage 3 – Unified Kernel Image (UKI): ${uki_authentihash}"
|
echo "Stage 3 - Unified Kernel Image (UKI): ${uki_authentihash}"
|
||||||
echo ""
|
echo ""
|
||||||
echo "Expected PCR[4]: ${expected_pcr_4}"
|
echo "Expected PCR[4]: ${expected_pcr_4}"
|
||||||
echo ""
|
echo ""
|
||||||
|
|
|
@ -13,26 +13,26 @@ set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
source "$(dirname "$0")/measure_util.sh"
|
source "$(dirname "$0")/measure_util.sh"
|
||||||
|
|
||||||
get_cmdline_from_uki () {
|
get_cmdline_from_uki() {
|
||||||
local uki="$1"
|
local uki="$1"
|
||||||
local output="$2"
|
local output="$2"
|
||||||
objcopy -O binary --only-section=.cmdline "${uki}" "${output}"
|
objcopy -O binary --only-section=.cmdline "${uki}" "${output}"
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdline_measure () {
|
cmdline_measure() {
|
||||||
local path="$1"
|
local path="$1"
|
||||||
local tmp
|
local tmp
|
||||||
tmp=$(mktemp)
|
tmp=$(mktemp)
|
||||||
# convert to utf-16le and add a null terminator
|
# convert to utf-16le and add a null terminator
|
||||||
iconv -f utf-8 -t utf-16le "${path}" -o "${tmp}"
|
iconv -f utf-8 -t utf-16le "${path}" -o "${tmp}"
|
||||||
truncate -s +2 "${tmp}"
|
truncate -s +2 "${tmp}"
|
||||||
sha256sum "${tmp}" | cut -d " " -f 1
|
sha256sum "${tmp}" | cut -d " " -f 1
|
||||||
rm "${tmp}"
|
rm "${tmp}"
|
||||||
}
|
}
|
||||||
|
|
||||||
write_output () {
|
write_output() {
|
||||||
local out="$1"
|
local out="$1"
|
||||||
cat > "${out}" <<EOF
|
cat > "${out}" << EOF
|
||||||
{
|
{
|
||||||
"pcr8": "${expected_pcr_8}",
|
"pcr8": "${expected_pcr_8}",
|
||||||
"cmdline": "${cmdline}"
|
"cmdline": "${cmdline}"
|
||||||
|
@ -58,10 +58,10 @@ cleanup "${DIR}"
|
||||||
|
|
||||||
expected_pcr_8=0000000000000000000000000000000000000000000000000000000000000000
|
expected_pcr_8=0000000000000000000000000000000000000000000000000000000000000000
|
||||||
expected_pcr_8=$(pcr_extend "${expected_pcr_8}" "${cmdline_hash}" "sha256sum")
|
expected_pcr_8=$(pcr_extend "${expected_pcr_8}" "${cmdline_hash}" "sha256sum")
|
||||||
if [[ "${CSP}" == "azure" ]]; then
|
if [[ ${CSP} == "azure" ]]; then
|
||||||
# Azure displays the boot menu
|
# Azure displays the boot menu
|
||||||
# triggering an extra measurement of the kernel command line.
|
# triggering an extra measurement of the kernel command line.
|
||||||
expected_pcr_8=$(pcr_extend "${expected_pcr_8}" "${cmdline_hash}" "sha256sum")
|
expected_pcr_8=$(pcr_extend "${expected_pcr_8}" "${cmdline_hash}" "sha256sum")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Kernel commandline: ${cmdline}"
|
echo "Kernel commandline: ${cmdline}"
|
||||||
|
|
|
@ -12,21 +12,20 @@ shopt -s inherit_errexit
|
||||||
|
|
||||||
source "$(dirname "$0")/measure_util.sh"
|
source "$(dirname "$0")/measure_util.sh"
|
||||||
|
|
||||||
get_initrd_from_uki () {
|
get_initrd_from_uki() {
|
||||||
local uki="$1"
|
local uki="$1"
|
||||||
local output="$2"
|
local output="$2"
|
||||||
objcopy -O binary --only-section=.initrd "${uki}" "${output}"
|
objcopy -O binary --only-section=.initrd "${uki}" "${output}"
|
||||||
}
|
}
|
||||||
|
|
||||||
initrd_measure () {
|
initrd_measure() {
|
||||||
local path="$1"
|
local path="$1"
|
||||||
sha256sum "${path}" | cut -d " " -f 1
|
sha256sum "${path}" | cut -d " " -f 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
write_output() {
|
||||||
write_output () {
|
local out="$1"
|
||||||
local out="$1"
|
cat > "${out}" << EOF
|
||||||
cat > "${out}" <<EOF
|
|
||||||
{
|
{
|
||||||
"pcr9": "${expected_pcr_9}",
|
"pcr9": "${expected_pcr_9}",
|
||||||
"initrd": "${initrd_hash}"
|
"initrd": "${initrd_hash}"
|
||||||
|
|
|
@ -5,33 +5,31 @@ set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
attempts=1
|
attempts=1
|
||||||
until [[ "${attempts}" -gt 5 ]]
|
until [[ ${attempts} -gt 5 ]]; do
|
||||||
do
|
echo "obtaining goal state - attempt ${attempts}"
|
||||||
echo "obtaining goal state - attempt ${attempts}"
|
goalstate=$(curl --fail -v -X 'GET' -H "x-ms-agent-name: azure-vm-register" \
|
||||||
goalstate=$(curl --fail -v -X 'GET' -H "x-ms-agent-name: azure-vm-register" \
|
-H "Content-Type: text/xml;charset=utf-8" \
|
||||||
-H "Content-Type: text/xml;charset=utf-8" \
|
-H "x-ms-version: 2012-11-30" \
|
||||||
-H "x-ms-version: 2012-11-30" \
|
"http://168.63.129.16/machine/?comp=goalstate")
|
||||||
"http://168.63.129.16/machine/?comp=goalstate")
|
if [[ $? -eq 0 ]]; then
|
||||||
if [[ $? -eq 0 ]]
|
echo "successfully retrieved goal state"
|
||||||
then
|
retrieved_goal_state=true
|
||||||
echo "successfully retrieved goal state"
|
break
|
||||||
retrieved_goal_state=true
|
fi
|
||||||
break
|
sleep 5
|
||||||
fi
|
attempts=$((attempts + 1))
|
||||||
sleep 5
|
|
||||||
attempts=$((attempts+1))
|
|
||||||
done
|
done
|
||||||
|
|
||||||
if [[ "${retrieved_goal_state}" != "true" ]]
|
if [[ ${retrieved_goal_state} != "true" ]]; then
|
||||||
then
|
echo "failed to obtain goal state - cannot register this VM"
|
||||||
echo "failed to obtain goal state - cannot register this VM"
|
exit 1
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
container_id=$(grep ContainerId <<< "${goalstate}" | sed 's/\s*<\/*ContainerId>//g' | sed 's/\r$//')
|
container_id=$(grep ContainerId <<< "${goalstate}" | sed 's/\s*<\/*ContainerId>//g' | sed 's/\r$//')
|
||||||
instance_id=$(grep InstanceId <<< "${goalstate}" | sed 's/\s*<\/*InstanceId>//g' | sed 's/\r$//')
|
instance_id=$(grep InstanceId <<< "${goalstate}" | sed 's/\s*<\/*InstanceId>//g' | sed 's/\r$//')
|
||||||
|
|
||||||
ready_doc=$(cat << EOF
|
ready_doc=$(
|
||||||
|
cat << EOF
|
||||||
<?xml version="1.0" encoding="utf-8"?>
|
<?xml version="1.0" encoding="utf-8"?>
|
||||||
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
|
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema">
|
||||||
<GoalStateIncarnation>1</GoalStateIncarnation>
|
<GoalStateIncarnation>1</GoalStateIncarnation>
|
||||||
|
@ -51,18 +49,16 @@ EOF
|
||||||
)
|
)
|
||||||
|
|
||||||
attempts=1
|
attempts=1
|
||||||
until [[ "${attempts}" -gt 5 ]]
|
until [[ ${attempts} -gt 5 ]]; do
|
||||||
do
|
echo "registering with Azure - attempt ${attempts}"
|
||||||
echo "registering with Azure - attempt ${attempts}"
|
curl --fail -v -X 'POST' -H "x-ms-agent-name: azure-vm-register" \
|
||||||
curl --fail -v -X 'POST' -H "x-ms-agent-name: azure-vm-register" \
|
-H "Content-Type: text/xml;charset=utf-8" \
|
||||||
-H "Content-Type: text/xml;charset=utf-8" \
|
-H "x-ms-version: 2012-11-30" \
|
||||||
-H "x-ms-version: 2012-11-30" \
|
-d "${ready_doc}" \
|
||||||
-d "${ready_doc}" \
|
"http://168.63.129.16/machine?comp=health"
|
||||||
"http://168.63.129.16/machine?comp=health"
|
if [[ $? -eq 0 ]]; then
|
||||||
if [[ $? -eq 0 ]]
|
echo "successfully register with Azure"
|
||||||
then
|
break
|
||||||
echo "successfully register with Azure"
|
fi
|
||||||
break
|
sleep 5 # sleep to prevent throttling from wire server
|
||||||
fi
|
|
||||||
sleep 5 # sleep to prevent throttling from wire server
|
|
||||||
done
|
done
|
||||||
|
|
|
@ -7,27 +7,29 @@ set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
depends() {
|
depends() {
|
||||||
echo systemd
|
echo systemd
|
||||||
}
|
}
|
||||||
|
|
||||||
install_and_enable_unit() {
|
install_and_enable_unit() {
|
||||||
unit="$1"; shift
|
unit="$1"
|
||||||
target="$1"; shift
|
shift
|
||||||
inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}"
|
target="$1"
|
||||||
mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants"
|
shift
|
||||||
ln_r "${systemdsystemunitdir}/${unit}" \
|
inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}"
|
||||||
"${systemdsystemconfdir}/${target}.wants/${unit}"
|
mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants"
|
||||||
|
ln_r "${systemdsystemunitdir}/${unit}" \
|
||||||
|
"${systemdsystemconfdir}/${target}.wants/${unit}"
|
||||||
}
|
}
|
||||||
|
|
||||||
install() {
|
install() {
|
||||||
inst_multiple \
|
inst_multiple \
|
||||||
bash \
|
bash \
|
||||||
curl \
|
curl \
|
||||||
grep \
|
grep \
|
||||||
sed
|
sed
|
||||||
|
|
||||||
inst_script "${moddir}/azure-provisioning.sh" \
|
inst_script "${moddir}/azure-provisioning.sh" \
|
||||||
"/usr/local/bin/azure-provisioning"
|
"/usr/local/bin/azure-provisioning"
|
||||||
install_and_enable_unit "azure-provisioning.service" \
|
install_and_enable_unit "azure-provisioning.service" \
|
||||||
"basic.target"
|
"basic.target"
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,20 +10,18 @@ AWS_STATE_DISK_DEVICENAME="sdb"
|
||||||
AWS_STATE_DISK_SYMLINK="/dev/${AWS_STATE_DISK_DEVICENAME}"
|
AWS_STATE_DISK_SYMLINK="/dev/${AWS_STATE_DISK_DEVICENAME}"
|
||||||
|
|
||||||
# hack: aws nvme udev rules are never executed. Create symlinks for the nvme devices manually.
|
# hack: aws nvme udev rules are never executed. Create symlinks for the nvme devices manually.
|
||||||
while [[ ! -L "${AWS_STATE_DISK_SYMLINK}" ]]
|
while [[ ! -L ${AWS_STATE_DISK_SYMLINK} ]]; do
|
||||||
do
|
for nvmedisk in /dev/nvme*n1; do
|
||||||
for nvmedisk in /dev/nvme*n1
|
linkname=$(nvme amzn id-ctrl -b "${nvmedisk}" | tail -c +3072 | tr -d ' ') || true
|
||||||
do
|
if [[ -n ${linkname} ]] && [[ ${linkname} == "${AWS_STATE_DISK_DEVICENAME}" ]]; then
|
||||||
linkname=$(nvme amzn id-ctrl -b "${nvmedisk}" | tail -c +3072 | tr -d ' ') || true
|
ln -s "${nvmedisk}" "${AWS_STATE_DISK_SYMLINK}"
|
||||||
if [[ -n "${linkname}" ]] && [[ "${linkname}" == "${AWS_STATE_DISK_DEVICENAME}" ]]; then
|
|
||||||
ln -s "${nvmedisk}" "${AWS_STATE_DISK_SYMLINK}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
if [[ -L "${AWS_STATE_DISK_SYMLINK}" ]]; then
|
|
||||||
break
|
|
||||||
fi
|
fi
|
||||||
echo "Waiting for state disk to appear.."
|
done
|
||||||
sleep 2
|
if [[ -L ${AWS_STATE_DISK_SYMLINK} ]]; then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
echo "Waiting for state disk to appear.."
|
||||||
|
sleep 2
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "AWS state disk found"
|
echo "AWS state disk found"
|
||||||
|
|
|
@ -4,73 +4,76 @@
|
||||||
# SPDX-License-Identifier: AGPL-3.0-only
|
# SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
depends() {
|
depends() {
|
||||||
# expands to: systemd systemd-hostnamed systemd-networkd systemd-resolved systemd-timedated systemd-timesyncd
|
# expands to: systemd systemd-hostnamed systemd-networkd systemd-resolved systemd-timedated systemd-timesyncd
|
||||||
echo systemd-network-management
|
echo systemd-network-management
|
||||||
}
|
}
|
||||||
|
|
||||||
install_and_enable_unit() {
|
install_and_enable_unit() {
|
||||||
unit="$1"; shift
|
unit="$1"
|
||||||
target="$1"; shift
|
shift
|
||||||
inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}"
|
target="$1"
|
||||||
mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants"
|
shift
|
||||||
ln_r "${systemdsystemunitdir}/${unit}" \
|
inst_simple "${moddir:?}/${unit}" "${systemdsystemunitdir:?}/${unit}"
|
||||||
"${systemdsystemconfdir}/${target}.wants/${unit}"
|
mkdir -p "${initdir:?}${systemdsystemconfdir:?}/${target}.wants"
|
||||||
|
ln_r "${systemdsystemunitdir}/${unit}" \
|
||||||
|
"${systemdsystemconfdir}/${target}.wants/${unit}"
|
||||||
}
|
}
|
||||||
|
|
||||||
install_path() {
|
install_path() {
|
||||||
local dir="$1"; shift
|
local dir="$1"
|
||||||
mkdir -p "${initdir}/${dir}"
|
shift
|
||||||
|
mkdir -p "${initdir}/${dir}"
|
||||||
}
|
}
|
||||||
|
|
||||||
install() {
|
install() {
|
||||||
inst_multiple \
|
inst_multiple \
|
||||||
bash
|
bash
|
||||||
inst_script "/usr/sbin/disk-mapper" \
|
inst_script "/usr/sbin/disk-mapper" \
|
||||||
"/usr/sbin/disk-mapper"
|
"/usr/sbin/disk-mapper"
|
||||||
|
|
||||||
inst_script "${moddir}/prepare-state-disk.sh" \
|
inst_script "${moddir}/prepare-state-disk.sh" \
|
||||||
"/usr/sbin/prepare-state-disk"
|
"/usr/sbin/prepare-state-disk"
|
||||||
install_and_enable_unit "prepare-state-disk.service" \
|
install_and_enable_unit "prepare-state-disk.service" \
|
||||||
"basic.target"
|
"basic.target"
|
||||||
install_and_enable_unit "configure-constel-csp.service" \
|
install_and_enable_unit "configure-constel-csp.service" \
|
||||||
"basic.target"
|
"basic.target"
|
||||||
|
|
||||||
# aws nvme disks
|
# aws nvme disks
|
||||||
inst_multiple \
|
inst_multiple \
|
||||||
tail \
|
tail \
|
||||||
tr
|
tr
|
||||||
|
|
||||||
# azure scsi disks
|
# azure scsi disks
|
||||||
inst_multiple \
|
inst_multiple \
|
||||||
cut \
|
cut \
|
||||||
readlink
|
readlink
|
||||||
|
|
||||||
# gcp nvme disks
|
# gcp nvme disks
|
||||||
inst_multiple \
|
inst_multiple \
|
||||||
date \
|
date \
|
||||||
xxd \
|
xxd \
|
||||||
grep \
|
grep \
|
||||||
sed \
|
sed \
|
||||||
ln \
|
ln \
|
||||||
command \
|
command \
|
||||||
readlink
|
readlink
|
||||||
|
|
||||||
inst_script "/usr/sbin/nvme" \
|
inst_script "/usr/sbin/nvme" \
|
||||||
"/usr/sbin/nvme"
|
"/usr/sbin/nvme"
|
||||||
inst_script "/usr/lib/udev/google_nvme_id" \
|
inst_script "/usr/lib/udev/google_nvme_id" \
|
||||||
"/usr/lib/udev/google_nvme_id"
|
"/usr/lib/udev/google_nvme_id"
|
||||||
inst_rules "64-gce-disk-removal.rules" "65-gce-disk-naming.rules"
|
inst_rules "64-gce-disk-removal.rules" "65-gce-disk-naming.rules"
|
||||||
|
|
||||||
inst_script "${moddir}/aws-nvme-disk.sh" \
|
inst_script "${moddir}/aws-nvme-disk.sh" \
|
||||||
"/usr/sbin/aws-nvme-disk"
|
"/usr/sbin/aws-nvme-disk"
|
||||||
install_and_enable_unit "aws-nvme-disk.service" \
|
install_and_enable_unit "aws-nvme-disk.service" \
|
||||||
"basic.target"
|
"basic.target"
|
||||||
|
|
||||||
# TLS / CA store in initramfs
|
# TLS / CA store in initramfs
|
||||||
install_path /etc/pki/tls/certs/
|
install_path /etc/pki/tls/certs/
|
||||||
inst_simple /etc/pki/tls/certs/ca-bundle.crt \
|
inst_simple /etc/pki/tls/certs/ca-bundle.crt \
|
||||||
/etc/pki/tls/certs/ca-bundle.crt
|
/etc/pki/tls/certs/ca-bundle.crt
|
||||||
|
|
||||||
# backport of https://github.com/dracutdevs/dracut/commit/dcbe23c14d13ca335ad327b7bb985071ca442f12
|
# backport of https://github.com/dracutdevs/dracut/commit/dcbe23c14d13ca335ad327b7bb985071ca442f12
|
||||||
inst_simple "${moddir}/sysusers-dracut.conf" "${systemdsystemunitdir}/systemd-sysusers.service.d/sysusers-dracut.conf"
|
inst_simple "${moddir}/sysusers-dracut.conf" "${systemdsystemunitdir}/systemd-sysusers.service.d/sysusers-dracut.conf"
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ shopt -s inherit_errexit
|
||||||
# Store encryption key (random or recovered key) in /run/cryptsetup-keys.d/state.key
|
# Store encryption key (random or recovered key) in /run/cryptsetup-keys.d/state.key
|
||||||
disk-mapper -csp "${CONSTEL_CSP}"
|
disk-mapper -csp "${CONSTEL_CSP}"
|
||||||
if [[ $? -ne 0 ]]; then
|
if [[ $? -ne 0 ]]; then
|
||||||
echo "Failed to prepare state disk"
|
echo "Failed to prepare state disk"
|
||||||
sleep 2 # give the serial console time to print the error message
|
sleep 2 # give the serial console time to print the error message
|
||||||
exit $? # exit with the same error code as disk-mapper
|
exit $? # exit with the same error code as disk-mapper
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -59,7 +59,7 @@ function get_namespace_device_name() {
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -z "${nvme_json}" ]]; then
|
if [[ -z ${nvme_json} ]]; then
|
||||||
err "NVMe Vendor Extension disk information not present"
|
err "NVMe Vendor Extension disk information not present"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
@ -68,7 +68,7 @@ function get_namespace_device_name() {
|
||||||
device_name="$(echo "${nvme_json}" | grep device_name | sed -e 's/.*"device_name":[ \t]*"\([a-zA-Z0-9_-]\+\)".*/\1/')"
|
device_name="$(echo "${nvme_json}" | grep device_name | sed -e 's/.*"device_name":[ \t]*"\([a-zA-Z0-9_-]\+\)".*/\1/')"
|
||||||
|
|
||||||
# Error if our device name is empty
|
# Error if our device name is empty
|
||||||
if [[ -z "${device_name}" ]]; then
|
if [[ -z ${device_name} ]]; then
|
||||||
err "Empty name"
|
err "Empty name"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
@ -91,7 +91,7 @@ function get_namespace_device_name() {
|
||||||
function get_namespace_number() {
|
function get_namespace_number() {
|
||||||
local dev_path="$1"
|
local dev_path="$1"
|
||||||
local namespace_number
|
local namespace_number
|
||||||
if [[ "${dev_path}" =~ ${NAMESPACE_NUMBER_REGEX} ]]; then
|
if [[ ${dev_path} =~ ${NAMESPACE_NUMBER_REGEX} ]]; then
|
||||||
namespace_number="${BASH_REMATCH[1]}"
|
namespace_number="${BASH_REMATCH[1]}"
|
||||||
else
|
else
|
||||||
return 1
|
return 1
|
||||||
|
@ -114,7 +114,7 @@ function get_namespace_number() {
|
||||||
function get_partition_number() {
|
function get_partition_number() {
|
||||||
local dev_path="$1"
|
local dev_path="$1"
|
||||||
local partition_number
|
local partition_number
|
||||||
if [[ "${dev_path}" =~ ${PARTITION_NUMBER_REGEX} ]]; then
|
if [[ ${dev_path} =~ ${PARTITION_NUMBER_REGEX} ]]; then
|
||||||
partition_number="${BASH_REMATCH[1]}"
|
partition_number="${BASH_REMATCH[1]}"
|
||||||
echo "${partition_number}"
|
echo "${partition_number}"
|
||||||
else
|
else
|
||||||
|
@ -136,7 +136,7 @@ function gen_symlink() {
|
||||||
local partition_number
|
local partition_number
|
||||||
partition_number="$(get_partition_number "${dev_path}")"
|
partition_number="$(get_partition_number "${dev_path}")"
|
||||||
|
|
||||||
if [[ -n "${partition_number}" ]]; then
|
if [[ -n ${partition_number} ]]; then
|
||||||
ln -s "${dev_path}" /dev/disk/by-id/google-"${ID_SERIAL_SHORT}"-part"${partition_number}" > /dev/null 2>&1
|
ln -s "${dev_path}" /dev/disk/by-id/google-"${ID_SERIAL_SHORT}"-part"${partition_number}" > /dev/null 2>&1
|
||||||
else
|
else
|
||||||
ln -s "${dev_path}" /dev/disk/by-id/google-"${ID_SERIAL_SHORT}" > /dev/null 2>&1
|
ln -s "${dev_path}" /dev/disk/by-id/google-"${ID_SERIAL_SHORT}" > /dev/null 2>&1
|
||||||
|
@ -182,19 +182,21 @@ function main() {
|
||||||
|
|
||||||
while getopts :d:sh flag; do
|
while getopts :d:sh flag; do
|
||||||
case "${flag}" in
|
case "${flag}" in
|
||||||
d) device_path="${OPTARG}";;
|
d) device_path="${OPTARG}" ;;
|
||||||
s) opt_gen_symlink='true';;
|
s) opt_gen_symlink='true' ;;
|
||||||
h) print_help_message
|
h)
|
||||||
return 0
|
print_help_message
|
||||||
;;
|
return 0
|
||||||
:) echo "Invalid option: ${OPTARG} requires an argument" 1>&2
|
;;
|
||||||
return 1
|
:)
|
||||||
;;
|
echo "Invalid option: ${OPTARG} requires an argument" 1>&2
|
||||||
*) return 1
|
return 1
|
||||||
|
;;
|
||||||
|
*) return 1 ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
if [[ -z "${device_path}" ]]; then
|
if [[ -z ${device_path} ]]; then
|
||||||
echo "Device path (-d) argument required. Use -h for full usage." 1>&2
|
echo "Device path (-d) argument required. Use -h for full usage." 1>&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@ -208,7 +210,7 @@ with sudo or install nvme-cli."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Ensure the passed device is actually an NVMe device
|
# Ensure the passed device is actually an NVMe device
|
||||||
"${nvme_cli_bin}" id-ctrl "${device_path}" &>/dev/null
|
"${nvme_cli_bin}" id-ctrl "${device_path}" &> /dev/null
|
||||||
if [[ $? -ne 0 ]]; then
|
if [[ $? -ne 0 ]]; then
|
||||||
err "Passed device was not an NVMe device. (You may need to run this \
|
err "Passed device was not an NVMe device. (You may need to run this \
|
||||||
script as root/with sudo)."
|
script as root/with sudo)."
|
||||||
|
@ -218,7 +220,7 @@ script as root/with sudo)."
|
||||||
# Detect the type of attached nvme device
|
# Detect the type of attached nvme device
|
||||||
local controller_id
|
local controller_id
|
||||||
controller_id=$("${nvme_cli_bin}" id-ctrl "${device_path}")
|
controller_id=$("${nvme_cli_bin}" id-ctrl "${device_path}")
|
||||||
if [[ ! "${controller_id}" =~ nvme_card-pd ]] ; then
|
if [[ ! ${controller_id} =~ nvme_card-pd ]]; then
|
||||||
err "Device is not a PD-NVMe device"
|
err "Device is not a PD-NVMe device"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
@ -231,7 +233,7 @@ script as root/with sudo)."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Gen symlinks or print out the globals set by the identify command
|
# Gen symlinks or print out the globals set by the identify command
|
||||||
if [[ "${opt_gen_symlink}" == 'true' ]]; then
|
if [[ ${opt_gen_symlink} == 'true' ]]; then
|
||||||
gen_symlink "${device_path}"
|
gen_symlink "${device_path}"
|
||||||
else
|
else
|
||||||
# These will be consumed by udev
|
# These will be consumed by udev
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
# and prints the message to the serial console
|
# and prints the message to the serial console
|
||||||
|
|
||||||
main() {
|
main() {
|
||||||
pcr_state="$(tpm2_pcrread sha256)"
|
pcr_state="$(tpm2_pcrread sha256)"
|
||||||
echo -e "PCR state:\n${pcr_state}\n" > /run/issue.d/35_constellation_pcrs.issue
|
echo -e "PCR state:\n${pcr_state}\n" > /run/issue.d/35_constellation_pcrs.issue
|
||||||
}
|
}
|
||||||
|
|
||||||
main
|
main
|
||||||
|
|
|
@ -3,27 +3,27 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then
|
if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
. "${CONFIG_FILE}"
|
. "${CONFIG_FILE}"
|
||||||
fi
|
fi
|
||||||
POSITIONAL_ARGS=()
|
POSITIONAL_ARGS=()
|
||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
-n|--name)
|
-n | --name)
|
||||||
AZURE_VM_NAME="$2"
|
AZURE_VM_NAME="$2"
|
||||||
shift # past argument
|
shift # past argument
|
||||||
shift # past value
|
shift # past value
|
||||||
;;
|
;;
|
||||||
-*)
|
-*)
|
||||||
echo "Unknown option $1"
|
echo "Unknown option $1"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
POSITIONAL_ARGS+=("$1") # save positional arg
|
POSITIONAL_ARGS+=("$1") # save positional arg
|
||||||
shift # past argument
|
shift # past argument
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -38,34 +38,33 @@ SUBNET=$(echo "${NIC_INFO}" | jq -r '.ipConfigurations[0].subnet.id')
|
||||||
VNET=${SUBNET//\/subnets\/.*/}
|
VNET=${SUBNET//\/subnets\/.*/}
|
||||||
DISK=$(echo "${AZ_VM_INFO}" | jq -r '.storageProfile.osDisk.managedDisk.id')
|
DISK=$(echo "${AZ_VM_INFO}" | jq -r '.storageProfile.osDisk.managedDisk.id')
|
||||||
|
|
||||||
|
delete_vm() {
|
||||||
delete_vm () {
|
az vm delete -y --name "${AZURE_VM_NAME}" \
|
||||||
az vm delete -y --name "${AZURE_VM_NAME}" \
|
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" || true
|
||||||
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" || true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_vnet () {
|
delete_vnet() {
|
||||||
az network vnet delete --ids "${VNET}" || true
|
az network vnet delete --ids "${VNET}" || true
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_subnet () {
|
delete_subnet() {
|
||||||
az network vnet subnet delete --ids "${SUBNET}" || true
|
az network vnet subnet delete --ids "${SUBNET}" || true
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_nsg () {
|
delete_nsg() {
|
||||||
az network nsg delete --ids "${NSG}" || true
|
az network nsg delete --ids "${NSG}" || true
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_pubip () {
|
delete_pubip() {
|
||||||
az network public-ip delete --ids "${PUBIP}" || true
|
az network public-ip delete --ids "${PUBIP}" || true
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_disk () {
|
delete_disk() {
|
||||||
az disk delete -y --ids "${DISK}" || true
|
az disk delete -y --ids "${DISK}" || true
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_nic () {
|
delete_nic() {
|
||||||
az network nic delete --ids "${NIC}" || true
|
az network nic delete --ids "${NIC}" || true
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_vm
|
delete_vm
|
||||||
|
|
|
@ -3,28 +3,28 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then
|
if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
. "${CONFIG_FILE}"
|
. "${CONFIG_FILE}"
|
||||||
fi
|
fi
|
||||||
AZURE_SUBSCRIPTION=$(az account show --query id -o tsv)
|
AZURE_SUBSCRIPTION=$(az account show --query id -o tsv)
|
||||||
POSITIONAL_ARGS=()
|
POSITIONAL_ARGS=()
|
||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
-n|--name)
|
-n | --name)
|
||||||
AZURE_VM_NAME="$2"
|
AZURE_VM_NAME="$2"
|
||||||
shift # past argument
|
shift # past argument
|
||||||
shift # past value
|
shift # past value
|
||||||
;;
|
;;
|
||||||
-*)
|
-*)
|
||||||
echo "Unknown option $1"
|
echo "Unknown option $1"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
POSITIONAL_ARGS+=("$1") # save positional arg
|
POSITIONAL_ARGS+=("$1") # save positional arg
|
||||||
shift # past argument
|
shift # past argument
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
|
@ -34,10 +34,10 @@ VM_DISK=$(az vm show -g "${AZURE_RESOURCE_GROUP_NAME}" --name "${AZURE_VM_NAME}"
|
||||||
LOCATION=$(az disk show --ids "${VM_DISK}" --query "location" -o tsv)
|
LOCATION=$(az disk show --ids "${VM_DISK}" --query "location" -o tsv)
|
||||||
|
|
||||||
az snapshot create \
|
az snapshot create \
|
||||||
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
--source "${VM_DISK}" \
|
--source "${VM_DISK}" \
|
||||||
--name "${AZURE_SNAPSHOT_NAME}" \
|
--name "${AZURE_SNAPSHOT_NAME}" \
|
||||||
-l "${LOCATION}"
|
-l "${LOCATION}"
|
||||||
|
|
||||||
# Azure CLI does not implement getSecureVMGuestStateSAS for snapshots yet
|
# Azure CLI does not implement getSecureVMGuestStateSAS for snapshots yet
|
||||||
# az snapshot grant-access \
|
# az snapshot grant-access \
|
||||||
|
@ -47,11 +47,11 @@ az snapshot create \
|
||||||
# -g "${AZURE_RESOURCE_GROUP_NAME}"
|
# -g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
|
|
||||||
BEGIN=$(az rest \
|
BEGIN=$(az rest \
|
||||||
--method post \
|
--method post \
|
||||||
--url "https://management.azure.com/subscriptions/${AZURE_SUBSCRIPTION}/resourceGroups/${AZURE_RESOURCE_GROUP_NAME}/providers/Microsoft.Compute/snapshots/${AZURE_SNAPSHOT_NAME}/beginGetAccess" \
|
--url "https://management.azure.com/subscriptions/${AZURE_SUBSCRIPTION}/resourceGroups/${AZURE_RESOURCE_GROUP_NAME}/providers/Microsoft.Compute/snapshots/${AZURE_SNAPSHOT_NAME}/beginGetAccess" \
|
||||||
--uri-parameters api-version="2021-12-01" \
|
--uri-parameters api-version="2021-12-01" \
|
||||||
--body '{"access": "Read", "durationInSeconds": 3600, "getSecureVMGuestStateSAS": true}' \
|
--body '{"access": "Read", "durationInSeconds": 3600, "getSecureVMGuestStateSAS": true}' \
|
||||||
--verbose 2>&1)
|
--verbose 2>&1)
|
||||||
ASYNC_OPERATION_URI=$(echo "${BEGIN}" | grep Azure-AsyncOperation | cut -d ' ' -f 7 | tr -d "'")
|
ASYNC_OPERATION_URI=$(echo "${BEGIN}" | grep Azure-AsyncOperation | cut -d ' ' -f 7 | tr -d "'")
|
||||||
sleep 10
|
sleep 10
|
||||||
ACCESS=$(az rest --method get --url "${ASYNC_OPERATION_URI}")
|
ACCESS=$(az rest --method get --url "${ASYNC_OPERATION_URI}")
|
||||||
|
@ -60,9 +60,9 @@ VMGS_URL=$(echo "${ACCESS}" | jq -r '.properties.output.securityDataAccessSAS')
|
||||||
curl -L -o "${AZURE_VMGS_FILENAME}" "${VMGS_URL}"
|
curl -L -o "${AZURE_VMGS_FILENAME}" "${VMGS_URL}"
|
||||||
|
|
||||||
az snapshot revoke-access \
|
az snapshot revoke-access \
|
||||||
--name "${AZURE_SNAPSHOT_NAME}" \
|
--name "${AZURE_SNAPSHOT_NAME}" \
|
||||||
-g "${AZURE_RESOURCE_GROUP_NAME}"
|
-g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
az snapshot delete \
|
az snapshot delete \
|
||||||
--name "${AZURE_SNAPSHOT_NAME}" \
|
--name "${AZURE_SNAPSHOT_NAME}" \
|
||||||
-g "${AZURE_RESOURCE_GROUP_NAME}"
|
-g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
echo "VMGS saved to ${AZURE_VMGS_FILENAME}"
|
echo "VMGS saved to ${AZURE_VMGS_FILENAME}"
|
||||||
|
|
|
@ -3,101 +3,101 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then
|
if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
. "${CONFIG_FILE}"
|
. "${CONFIG_FILE}"
|
||||||
fi
|
fi
|
||||||
POSITIONAL_ARGS=()
|
POSITIONAL_ARGS=()
|
||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
-n|--name)
|
-n | --name)
|
||||||
AZURE_VM_NAME="$2"
|
AZURE_VM_NAME="$2"
|
||||||
shift # past argument
|
shift # past argument
|
||||||
shift # past value
|
shift # past value
|
||||||
;;
|
;;
|
||||||
-g|--gallery)
|
-g | --gallery)
|
||||||
CREATE_FROM_GALLERY=YES
|
CREATE_FROM_GALLERY=YES
|
||||||
shift # past argument
|
shift # past argument
|
||||||
;;
|
;;
|
||||||
-d|--disk)
|
-d | --disk)
|
||||||
CREATE_FROM_GALLERY=NO
|
CREATE_FROM_GALLERY=NO
|
||||||
shift # past argument
|
shift # past argument
|
||||||
;;
|
;;
|
||||||
--secure-boot)
|
--secure-boot)
|
||||||
AZURE_SECURE_BOOT="$2"
|
AZURE_SECURE_BOOT="$2"
|
||||||
shift # past argument
|
shift # past argument
|
||||||
shift # past value
|
shift # past value
|
||||||
;;
|
;;
|
||||||
--disk-name)
|
--disk-name)
|
||||||
AZURE_DISK_NAME="$2"
|
AZURE_DISK_NAME="$2"
|
||||||
shift # past argument
|
shift # past argument
|
||||||
shift # past value
|
shift # past value
|
||||||
;;
|
;;
|
||||||
-*)
|
-*)
|
||||||
echo "Unknown option $1"
|
echo "Unknown option $1"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
POSITIONAL_ARGS+=("$1") # save positional arg
|
POSITIONAL_ARGS+=("$1") # save positional arg
|
||||||
shift # past argument
|
shift # past argument
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
|
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
|
||||||
|
|
||||||
if [[ "${AZURE_SECURITY_TYPE}" == "ConfidentialVM" ]]; then
|
if [[ ${AZURE_SECURITY_TYPE} == "ConfidentialVM" ]]; then
|
||||||
VMSIZE="Standard_DC2as_v5"
|
VMSIZE="Standard_DC2as_v5"
|
||||||
elif [[ "${AZURE_SECURITY_TYPE}" == "TrustedLaunch" ]]; then
|
elif [[ ${AZURE_SECURITY_TYPE} == "TrustedLaunch" ]]; then
|
||||||
VMSIZE="standard_D2as_v5"
|
VMSIZE="standard_D2as_v5"
|
||||||
else
|
else
|
||||||
echo "Unknown security type: ${AZURE_SECURITY_TYPE}"
|
echo "Unknown security type: ${AZURE_SECURITY_TYPE}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
create_vm_from_disk () {
|
create_vm_from_disk() {
|
||||||
AZURE_DISK_REFERENCE=$(az disk show --resource-group "${AZURE_RESOURCE_GROUP_NAME}" --name "${AZURE_DISK_NAME}" --query id -o tsv)
|
AZURE_DISK_REFERENCE=$(az disk show --resource-group "${AZURE_RESOURCE_GROUP_NAME}" --name "${AZURE_DISK_NAME}" --query id -o tsv)
|
||||||
az vm create --name "${AZURE_VM_NAME}" \
|
az vm create --name "${AZURE_VM_NAME}" \
|
||||||
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
-l "${AZURE_REGION}" \
|
-l "${AZURE_REGION}" \
|
||||||
--size "${VMSIZE}" \
|
--size "${VMSIZE}" \
|
||||||
--public-ip-sku Standard \
|
--public-ip-sku Standard \
|
||||||
--os-type Linux \
|
--os-type Linux \
|
||||||
--attach-os-disk "${AZURE_DISK_REFERENCE}" \
|
--attach-os-disk "${AZURE_DISK_REFERENCE}" \
|
||||||
--security-type "${AZURE_SECURITY_TYPE}" \
|
--security-type "${AZURE_SECURITY_TYPE}" \
|
||||||
--os-disk-security-encryption-type VMGuestStateOnly \
|
--os-disk-security-encryption-type VMGuestStateOnly \
|
||||||
--enable-vtpm true \
|
--enable-vtpm true \
|
||||||
--enable-secure-boot "${AZURE_SECURE_BOOT}" \
|
--enable-secure-boot "${AZURE_SECURE_BOOT}" \
|
||||||
--boot-diagnostics-storage "" \
|
--boot-diagnostics-storage "" \
|
||||||
--no-wait
|
--no-wait
|
||||||
}
|
}
|
||||||
|
|
||||||
create_vm_from_sig () {
|
create_vm_from_sig() {
|
||||||
AZURE_IMAGE_REFERENCE=$(az sig image-version show \
|
AZURE_IMAGE_REFERENCE=$(az sig image-version show \
|
||||||
--gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \
|
--gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \
|
||||||
--gallery-image-version "${AZURE_IMAGE_VERSION}" \
|
--gallery-image-version "${AZURE_IMAGE_VERSION}" \
|
||||||
--gallery-name "${AZURE_GALLERY_NAME}" \
|
--gallery-name "${AZURE_GALLERY_NAME}" \
|
||||||
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
--query id -o tsv)
|
--query id -o tsv)
|
||||||
az vm create --name "${AZURE_VM_NAME}" \
|
az vm create --name "${AZURE_VM_NAME}" \
|
||||||
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
-l "${AZURE_REGION}" \
|
-l "${AZURE_REGION}" \
|
||||||
--size "${VMSIZE}" \
|
--size "${VMSIZE}" \
|
||||||
--public-ip-sku Standard \
|
--public-ip-sku Standard \
|
||||||
--image "${AZURE_IMAGE_REFERENCE}" \
|
--image "${AZURE_IMAGE_REFERENCE}" \
|
||||||
--security-type "${AZURE_SECURITY_TYPE}" \
|
--security-type "${AZURE_SECURITY_TYPE}" \
|
||||||
--os-disk-security-encryption-type VMGuestStateOnly \
|
--os-disk-security-encryption-type VMGuestStateOnly \
|
||||||
--enable-vtpm true \
|
--enable-vtpm true \
|
||||||
--enable-secure-boot "${AZURE_SECURE_BOOT}" \
|
--enable-secure-boot "${AZURE_SECURE_BOOT}" \
|
||||||
--boot-diagnostics-storage "" \
|
--boot-diagnostics-storage "" \
|
||||||
--no-wait
|
--no-wait
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ "${CREATE_FROM_GALLERY}" = "YES" ]]; then
|
if [[ ${CREATE_FROM_GALLERY} == "YES" ]]; then
|
||||||
create_vm_from_sig
|
create_vm_from_sig
|
||||||
else
|
else
|
||||||
create_vm_from_disk
|
create_vm_from_disk
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sleep 30
|
sleep 30
|
||||||
|
|
|
@ -6,90 +6,90 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd)
|
||||||
BASE_DIR=$(realpath "${SCRIPT_DIR}/..")
|
BASE_DIR=$(realpath "${SCRIPT_DIR}/..")
|
||||||
|
|
||||||
# Set to qemu+tcp://localhost:16599/system for dockerized libvirt setup
|
# Set to qemu+tcp://localhost:16599/system for dockerized libvirt setup
|
||||||
if [[ -z "${LIBVIRT_SOCK}" ]]; then
|
if [[ -z ${LIBVIRT_SOCK} ]]; then
|
||||||
LIBVIRT_SOCK=qemu:///system
|
LIBVIRT_SOCK=qemu:///system
|
||||||
fi
|
fi
|
||||||
|
|
||||||
libvirt_nvram_gen () {
|
libvirt_nvram_gen() {
|
||||||
local image_path="${1}"
|
local image_path="${1}"
|
||||||
if test -f "${BASE_DIR}/image.nvram.template"; then
|
if test -f "${BASE_DIR}/image.nvram.template"; then
|
||||||
echo "NVRAM template already generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)"
|
echo "NVRAM template already generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)"
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
if ! test -f "${image_path}"; then
|
if ! test -f "${image_path}"; then
|
||||||
echo "Image \"${image_path}\" does not exist yet. To generate nvram, create disk image first."
|
echo "Image \"${image_path}\" does not exist yet. To generate nvram, create disk image first."
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
|
|
||||||
OVMF_CODE=/usr/share/OVMF/OVMF_CODE_4M.ms.fd
|
OVMF_CODE=/usr/share/OVMF/OVMF_CODE_4M.ms.fd
|
||||||
OVMF_VARS=/usr/share/OVMF/OVMF_VARS_4M.ms.fd
|
OVMF_VARS=/usr/share/OVMF/OVMF_VARS_4M.ms.fd
|
||||||
if ! test -f "${OVMF_CODE}"; then
|
if ! test -f "${OVMF_CODE}"; then
|
||||||
OVMF_CODE=/usr/share/OVMF/OVMF_CODE.secboot.fd
|
OVMF_CODE=/usr/share/OVMF/OVMF_CODE.secboot.fd
|
||||||
fi
|
fi
|
||||||
if ! test -f "${OVMF_VARS}"; then
|
if ! test -f "${OVMF_VARS}"; then
|
||||||
OVMF_VARS=/usr/share/OVMF/OVMF_VARS.secboot.fd
|
OVMF_VARS=/usr/share/OVMF/OVMF_VARS.secboot.fd
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Using OVMF_CODE: ${OVMF_CODE}"
|
echo "Using OVMF_CODE: ${OVMF_CODE}"
|
||||||
echo "Using OVMF_VARS: ${OVMF_VARS}"
|
echo "Using OVMF_VARS: ${OVMF_VARS}"
|
||||||
|
|
||||||
# generate nvram file using libvirt
|
# generate nvram file using libvirt
|
||||||
virt-install --name constell-nvram-gen \
|
virt-install --name constell-nvram-gen \
|
||||||
--connect "${LIBVIRT_SOCK}" \
|
--connect "${LIBVIRT_SOCK}" \
|
||||||
--nonetworks \
|
--nonetworks \
|
||||||
--description 'Constellation' \
|
--description 'Constellation' \
|
||||||
--ram 1024 \
|
--ram 1024 \
|
||||||
--vcpus 1 \
|
--vcpus 1 \
|
||||||
--osinfo detect=on,require=off \
|
--osinfo detect=on,require=off \
|
||||||
--disk "${image_path},format=raw" \
|
--disk "${image_path},format=raw" \
|
||||||
--boot "machine=q35,menu=on,loader=${OVMF_CODE},loader.readonly=yes,loader.type=pflash,nvram.template=${OVMF_VARS},nvram=${BASE_DIR}/image.nvram,loader_secure=yes" \
|
--boot "machine=q35,menu=on,loader=${OVMF_CODE},loader.readonly=yes,loader.type=pflash,nvram.template=${OVMF_VARS},nvram=${BASE_DIR}/image.nvram,loader_secure=yes" \
|
||||||
--features smm.state=on \
|
--features smm.state=on \
|
||||||
--noautoconsole
|
--noautoconsole
|
||||||
echo -e 'connect using'
|
echo -e 'connect using'
|
||||||
echo -e ' \u001b[1mvirsh console constell-nvram-gen\u001b[0m'
|
echo -e ' \u001b[1mvirsh console constell-nvram-gen\u001b[0m'
|
||||||
echo -e ''
|
echo -e ''
|
||||||
echo -e 'Load db cert with MokManager or enroll full PKI with firmware setup'
|
echo -e 'Load db cert with MokManager or enroll full PKI with firmware setup'
|
||||||
echo -e ''
|
echo -e ''
|
||||||
echo -e ' \u001b[1mMokManager\u001b[0m'
|
echo -e ' \u001b[1mMokManager\u001b[0m'
|
||||||
echo -e ' For mokmanager, try to boot as usual. You will see this message:'
|
echo -e ' For mokmanager, try to boot as usual. You will see this message:'
|
||||||
echo -e ' > "Verification failed: (0x1A) Security Violation"'
|
echo -e ' > "Verification failed: (0x1A) Security Violation"'
|
||||||
echo -e ' Press OK, then ENTER, then "Enroll key from disk"'
|
echo -e ' Press OK, then ENTER, then "Enroll key from disk"'
|
||||||
echo -e ' Select the following key:'
|
echo -e ' Select the following key:'
|
||||||
echo -e ' > \u001b[1m/EFI/loader/keys/auto/db.cer\u001b[0m'
|
echo -e ' > \u001b[1m/EFI/loader/keys/auto/db.cer\u001b[0m'
|
||||||
echo -e ' Press Continue, then choose "Yes" to the question "Enroll the key(s)?"'
|
echo -e ' Press Continue, then choose "Yes" to the question "Enroll the key(s)?"'
|
||||||
echo -e ' Choose reboot and continue this script.'
|
echo -e ' Choose reboot and continue this script.'
|
||||||
echo -e ''
|
echo -e ''
|
||||||
echo -e ' \u001b[1mFirmware setup\u001b[0m'
|
echo -e ' \u001b[1mFirmware setup\u001b[0m'
|
||||||
echo -e ' For firmware setup, press F2.'
|
echo -e ' For firmware setup, press F2.'
|
||||||
echo -e ' Go to "Device Manager">"Secure Boot Configuration">"Secure Boot Mode"'
|
echo -e ' Go to "Device Manager">"Secure Boot Configuration">"Secure Boot Mode"'
|
||||||
echo -e ' Choose "Custom Mode"'
|
echo -e ' Choose "Custom Mode"'
|
||||||
echo -e ' Go to "Custom Securee Boot Options"'
|
echo -e ' Go to "Custom Securee Boot Options"'
|
||||||
echo -e ' Go to "PK Options">"Enroll PK", Press "Y" if queried, "Enroll PK using File"'
|
echo -e ' Go to "PK Options">"Enroll PK", Press "Y" if queried, "Enroll PK using File"'
|
||||||
echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/PK.cer\u001b[0m'
|
echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/PK.cer\u001b[0m'
|
||||||
echo -e ' Choose "Commit Changes and Exit"'
|
echo -e ' Choose "Commit Changes and Exit"'
|
||||||
echo -e ' Go to "KEK Options">"Enroll KEK", Press "Y" if queried, "Enroll KEK using File"'
|
echo -e ' Go to "KEK Options">"Enroll KEK", Press "Y" if queried, "Enroll KEK using File"'
|
||||||
echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/KEK.cer\u001b[0m'
|
echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/KEK.cer\u001b[0m'
|
||||||
echo -e ' Choose "Commit Changes and Exit"'
|
echo -e ' Choose "Commit Changes and Exit"'
|
||||||
echo -e ' Go to "DB Options">"Enroll Signature">"Enroll Signature using File"'
|
echo -e ' Go to "DB Options">"Enroll Signature">"Enroll Signature using File"'
|
||||||
echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/db.cer\u001b[0m'
|
echo -e ' Select the following cert: \u001b[1m/EFI/loader/keys/auto/db.cer\u001b[0m'
|
||||||
echo -e ' Choose "Commit Changes and Exit"'
|
echo -e ' Choose "Commit Changes and Exit"'
|
||||||
echo -e ' Repeat the last step for the following certs:'
|
echo -e ' Repeat the last step for the following certs:'
|
||||||
echo -e ' > \u001b[1m/EFI/loader/keys/auto/MicWinProPCA2011_2011-10-19.crt\u001b[0m'
|
echo -e ' > \u001b[1m/EFI/loader/keys/auto/MicWinProPCA2011_2011-10-19.crt\u001b[0m'
|
||||||
echo -e ' > \u001b[1m/EFI/loader/keys/auto/MicCorUEFCA2011_2011-06-27.crt\u001b[0m'
|
echo -e ' > \u001b[1m/EFI/loader/keys/auto/MicCorUEFCA2011_2011-06-27.crt\u001b[0m'
|
||||||
echo -e ' Reboot and continue this script.'
|
echo -e ' Reboot and continue this script.'
|
||||||
echo -e ''
|
echo -e ''
|
||||||
echo -e 'Press ENTER to continue after you followed one of the guides from above.'
|
echo -e 'Press ENTER to continue after you followed one of the guides from above.'
|
||||||
read -r
|
read -r
|
||||||
sudo cp "${BASE_DIR}/image.nvram" "${BASE_DIR}/image.nvram.template"
|
sudo cp "${BASE_DIR}/image.nvram" "${BASE_DIR}/image.nvram.template"
|
||||||
virsh --connect "${LIBVIRT_SOCK}" destroy --domain constell-nvram-gen
|
virsh --connect "${LIBVIRT_SOCK}" destroy --domain constell-nvram-gen
|
||||||
virsh --connect "${LIBVIRT_SOCK}" undefine --nvram constell-nvram-gen
|
virsh --connect "${LIBVIRT_SOCK}" undefine --nvram constell-nvram-gen
|
||||||
rm -f "${BASE_DIR}/image.nvram"
|
rm -f "${BASE_DIR}/image.nvram"
|
||||||
|
|
||||||
echo "NVRAM template generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)"
|
echo "NVRAM template generated: $(realpath "--relative-to=$(pwd)" "${BASE_DIR}"/image.nvram.template)"
|
||||||
}
|
}
|
||||||
|
|
||||||
libvirt_nvram_gen "$1"
|
libvirt_nvram_gen "$1"
|
||||||
|
|
|
@ -12,58 +12,57 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd)
|
||||||
TEMPLATES=${SCRIPT_DIR}/templates
|
TEMPLATES=${SCRIPT_DIR}/templates
|
||||||
BASE_DIR=$(realpath "${SCRIPT_DIR}/..")
|
BASE_DIR=$(realpath "${SCRIPT_DIR}/..")
|
||||||
if [[ -z "${PKI}" ]]; then
|
if [[ -z ${PKI} ]]; then
|
||||||
PKI=${BASE_DIR}/pki
|
PKI=${BASE_DIR}/pki
|
||||||
fi
|
fi
|
||||||
if [[ -z "${PKI_SET}" ]]; then
|
if [[ -z ${PKI_SET} ]]; then
|
||||||
PKI_SET=dev
|
PKI_SET=dev
|
||||||
fi
|
fi
|
||||||
|
|
||||||
gen_pki () {
|
gen_pki() {
|
||||||
# Only use for non-production images.
|
# Only use for non-production images.
|
||||||
# Use real PKI for production images instead.
|
# Use real PKI for production images instead.
|
||||||
count=$(find "${PKI}" -maxdepth 1 \( -name '*.key' -o -name '*.crt' -o -name '*.cer' -o -name '*.esl' -o -name '*.auth' \) 2>/dev/null | wc -l)
|
count=$(find "${PKI}" -maxdepth 1 \( -name '*.key' -o -name '*.crt' -o -name '*.cer' -o -name '*.esl' -o -name '*.auth' \) 2> /dev/null | wc -l)
|
||||||
if [[ "${count}" != 0 ]]
|
if [[ ${count} != 0 ]]; then
|
||||||
then
|
echo PKI files "$(ls -1 "$(realpath "--relative-to=$(pwd)" "${PKI}")"/*.{key,crt,cer,esl,auth})" already exist
|
||||||
echo PKI files "$(ls -1 "$(realpath "--relative-to=$(pwd)" "${PKI}")"/*.{key,crt,cer,esl,auth})" already exist
|
return
|
||||||
return
|
fi
|
||||||
fi
|
mkdir -p "${PKI}"
|
||||||
mkdir -p "${PKI}"
|
pushd "${PKI}" || exit 1
|
||||||
pushd "${PKI}" || exit 1
|
|
||||||
|
|
||||||
uuid=$(systemd-id128 new --uuid)
|
uuid=$(systemd-id128 new --uuid)
|
||||||
for key in PK KEK db; do
|
for key in PK KEK db; do
|
||||||
openssl req -new -x509 -config "${TEMPLATES}/${PKI_SET}_${key}.conf" -keyout "${key}.key" -out "${key}.crt" -nodes
|
openssl req -new -x509 -config "${TEMPLATES}/${PKI_SET}_${key}.conf" -keyout "${key}.key" -out "${key}.crt" -nodes
|
||||||
openssl x509 -outform DER -in "${key}.crt" -out "${key}.cer"
|
openssl x509 -outform DER -in "${key}.crt" -out "${key}.cer"
|
||||||
cert-to-efi-sig-list -g "${uuid}" "${key}.crt" "${key}.esl"
|
cert-to-efi-sig-list -g "${uuid}" "${key}.crt" "${key}.esl"
|
||||||
done
|
done
|
||||||
|
|
||||||
for key in MicWinProPCA2011_2011-10-19.crt MicCorUEFCA2011_2011-06-27.crt MicCorKEKCA2011_2011-06-24.crt; do
|
for key in MicWinProPCA2011_2011-10-19.crt MicCorUEFCA2011_2011-06-27.crt MicCorKEKCA2011_2011-06-24.crt; do
|
||||||
curl -sL "https://www.microsoft.com/pkiops/certs/${key}" --output "${key}"
|
curl -sL "https://www.microsoft.com/pkiops/certs/${key}" --output "${key}"
|
||||||
sbsiglist --owner 77fa9abd-0359-4d32-bd60-28f4e78f784b --type x509 --output "${key%crt}esl" "${key}"
|
sbsiglist --owner 77fa9abd-0359-4d32-bd60-28f4e78f784b --type x509 --output "${key%crt}esl" "${key}"
|
||||||
done
|
done
|
||||||
|
|
||||||
# Optionally add Microsoft Windows Production CA 2011 (needed to boot into Windows).
|
# Optionally add Microsoft Windows Production CA 2011 (needed to boot into Windows).
|
||||||
cat MicWinProPCA2011_2011-10-19.esl >> db.esl
|
cat MicWinProPCA2011_2011-10-19.esl >> db.esl
|
||||||
|
|
||||||
# Optionally add Microsoft Corporation UEFI CA 2011 (for firmware drivers / option ROMs
|
# Optionally add Microsoft Corporation UEFI CA 2011 (for firmware drivers / option ROMs
|
||||||
# and third-party boot loaders (including shim). This is highly recommended on real
|
# and third-party boot loaders (including shim). This is highly recommended on real
|
||||||
# hardware as not including this may soft-brick your device (see next paragraph).
|
# hardware as not including this may soft-brick your device (see next paragraph).
|
||||||
cat MicCorUEFCA2011_2011-06-27.esl >> db.esl
|
cat MicCorUEFCA2011_2011-06-27.esl >> db.esl
|
||||||
|
|
||||||
# Optionally add Microsoft Corporation KEK CA 2011. Recommended if either of the
|
# Optionally add Microsoft Corporation KEK CA 2011. Recommended if either of the
|
||||||
# Microsoft keys is used as the official UEFI revocation database is signed with this
|
# Microsoft keys is used as the official UEFI revocation database is signed with this
|
||||||
# key. The revocation database can be updated with [fwupdmgr(1)](https://www.freedesktop.org/software/systemd/man/fwupdmgr.html#).
|
# key. The revocation database can be updated with [fwupdmgr(1)](https://www.freedesktop.org/software/systemd/man/fwupdmgr.html#).
|
||||||
cat MicCorKEKCA2011_2011-06-24.esl >> KEK.esl
|
cat MicCorKEKCA2011_2011-06-24.esl >> KEK.esl
|
||||||
|
|
||||||
sign-efi-sig-list -c PK.crt -k PK.key PK PK.esl PK.auth
|
sign-efi-sig-list -c PK.crt -k PK.key PK PK.esl PK.auth
|
||||||
sign-efi-sig-list -c PK.crt -k PK.key KEK KEK.esl KEK.auth
|
sign-efi-sig-list -c PK.crt -k PK.key KEK KEK.esl KEK.auth
|
||||||
sign-efi-sig-list -c KEK.crt -k KEK.key db db.esl db.auth
|
sign-efi-sig-list -c KEK.crt -k KEK.key db db.esl db.auth
|
||||||
|
|
||||||
popd || exit 1
|
popd || exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
# gen_pki generates a PKI for testing purposes only.
|
# gen_pki generates a PKI for testing purposes only.
|
||||||
|
|
|
@ -8,10 +8,9 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
if (( $# != 1 ))
|
if (($# != 1)); then
|
||||||
then
|
echo "Usage: $0 <image.raw>"
|
||||||
echo "Usage: $0 <image.raw>"
|
exit 1
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# SOURCE is the URL used to download the signed shim RPM
|
# SOURCE is the URL used to download the signed shim RPM
|
||||||
|
|
|
@ -8,55 +8,54 @@ shopt -s inherit_errexit
|
||||||
|
|
||||||
# Show progress on pipes if `pv` is installed
|
# Show progress on pipes if `pv` is installed
|
||||||
# Otherwise use plain cat
|
# Otherwise use plain cat
|
||||||
if ! command -v pv &> /dev/null
|
if ! command -v pv &> /dev/null; then
|
||||||
then
|
PV="cat"
|
||||||
PV="cat"
|
|
||||||
else
|
else
|
||||||
PV="pv"
|
PV="pv"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
pack () {
|
pack() {
|
||||||
local cloudprovider=$1
|
local cloudprovider=$1
|
||||||
local unpacked_image=$2
|
local unpacked_image=$2
|
||||||
local packed_image=$3
|
local packed_image=$3
|
||||||
local unpacked_image_dir
|
local unpacked_image_dir
|
||||||
unpacked_image_dir=$(mktemp -d)
|
unpacked_image_dir=$(mktemp -d)
|
||||||
local unpacked_image_filename
|
local unpacked_image_filename
|
||||||
unpacked_image_filename=disk.raw
|
unpacked_image_filename=disk.raw
|
||||||
local tmp_tar_file
|
local tmp_tar_file
|
||||||
tmp_tar_file=$(mktemp -t verity.XXXXXX.tar)
|
tmp_tar_file=$(mktemp -t verity.XXXXXX.tar)
|
||||||
cp "${unpacked_image}" "${unpacked_image_dir}/${unpacked_image_filename}"
|
cp "${unpacked_image}" "${unpacked_image_dir}/${unpacked_image_filename}"
|
||||||
|
|
||||||
case ${cloudprovider} in
|
case ${cloudprovider} in
|
||||||
|
|
||||||
gcp)
|
gcp)
|
||||||
echo "📥 Packing GCP image..."
|
echo "📥 Packing GCP image..."
|
||||||
tar --owner=0 --group=0 -C "${unpacked_image_dir}" -Sch --format=oldgnu -f "${tmp_tar_file}" "${unpacked_image_filename}"
|
tar --owner=0 --group=0 -C "${unpacked_image_dir}" -Sch --format=oldgnu -f "${tmp_tar_file}" "${unpacked_image_filename}"
|
||||||
"${PV}" "${tmp_tar_file}" | pigz -9c > "${packed_image}"
|
"${PV}" "${tmp_tar_file}" | pigz -9c > "${packed_image}"
|
||||||
rm "${tmp_tar_file}"
|
rm "${tmp_tar_file}"
|
||||||
echo " Repacked image stored in ${packed_image}"
|
echo " Repacked image stored in ${packed_image}"
|
||||||
;;
|
;;
|
||||||
|
|
||||||
azure)
|
azure)
|
||||||
echo "📥 Packing Azure image..."
|
echo "📥 Packing Azure image..."
|
||||||
truncate -s %1MiB "${unpacked_image_dir}/${unpacked_image_filename}"
|
truncate -s %1MiB "${unpacked_image_dir}/${unpacked_image_filename}"
|
||||||
qemu-img convert -p -f raw -O vpc -o force_size,subformat=fixed "${unpacked_image_dir}/${unpacked_image_filename}" "${packed_image}"
|
qemu-img convert -p -f raw -O vpc -o force_size,subformat=fixed "${unpacked_image_dir}/${unpacked_image_filename}" "${packed_image}"
|
||||||
echo " Repacked image stored in ${packed_image}"
|
echo " Repacked image stored in ${packed_image}"
|
||||||
;;
|
;;
|
||||||
|
|
||||||
*)
|
*)
|
||||||
echo "unknown cloud provider"
|
echo "unknown cloud provider"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
rm -r "${unpacked_image_dir}"
|
rm -r "${unpacked_image_dir}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if [[ $# -ne 3 ]]; then
|
if [[ $# -ne 3 ]]; then
|
||||||
echo "Usage: $0 <cloudprovider> <unpacked_image> <packed_image>"
|
echo "Usage: $0 <cloudprovider> <unpacked_image> <packed_image>"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
pack "${1}" "${2}" "${3}"
|
pack "${1}" "${2}" "${3}"
|
||||||
|
|
|
@ -6,9 +6,9 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then
|
if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
. "${CONFIG_FILE}"
|
. "${CONFIG_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CONTAINERS_JSON=$(mktemp /tmp/containers-XXXXXXXXXXXXXX.json)
|
CONTAINERS_JSON=$(mktemp /tmp/containers-XXXXXXXXXXXXXX.json)
|
||||||
|
@ -16,91 +16,91 @@ declare -A AMI_FOR_REGION
|
||||||
AMI_OUTPUT=$1
|
AMI_OUTPUT=$1
|
||||||
|
|
||||||
import_status() {
|
import_status() {
|
||||||
local import_task_id=$1
|
local import_task_id=$1
|
||||||
aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${import_task_id}" | jq -r '.ImportSnapshotTasks[0].SnapshotTaskDetail.Status'
|
aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${import_task_id}" | jq -r '.ImportSnapshotTasks[0].SnapshotTaskDetail.Status'
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_for_import() {
|
wait_for_import() {
|
||||||
local import_task_id=$1
|
local import_task_id=$1
|
||||||
|
local status
|
||||||
|
echo -n "Waiting for import to finish"
|
||||||
|
while true; do
|
||||||
local status
|
local status
|
||||||
echo -n "Waiting for import to finish"
|
status=$(import_status "${import_task_id}")
|
||||||
while true; do
|
case "${status}" in
|
||||||
local status
|
completed)
|
||||||
status=$(import_status "${import_task_id}")
|
echo -e "\nImport completed."
|
||||||
case "${status}" in
|
break
|
||||||
completed)
|
;;
|
||||||
echo -e "\nImport completed."
|
active)
|
||||||
break
|
echo -n "."
|
||||||
;;
|
sleep 5
|
||||||
active)
|
;;
|
||||||
echo -n "."
|
*)
|
||||||
sleep 5
|
echo "Unexpected status: ${status}"
|
||||||
;;
|
exit 1
|
||||||
*)
|
;;
|
||||||
echo "Unexpected status: ${status}"
|
esac
|
||||||
exit 1
|
done
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
wait_for_image_available() {
|
wait_for_image_available() {
|
||||||
local ami_id=$1
|
local ami_id=$1
|
||||||
local region=$2
|
local region=$2
|
||||||
echo -n "Waiting for image ${ami_id} to be available"
|
echo -n "Waiting for image ${ami_id} to be available"
|
||||||
while true; do
|
while true; do
|
||||||
# Waiter ImageAvailable failed: Max attempts exceeded
|
# Waiter ImageAvailable failed: Max attempts exceeded
|
||||||
local status
|
local status
|
||||||
status=$(aws ec2 wait image-available \
|
status=$(aws ec2 wait image-available \
|
||||||
--region "${region}" \
|
--region "${region}" \
|
||||||
--image-ids "${ami_id}" 2>&1 || true)
|
--image-ids "${ami_id}" 2>&1 || true)
|
||||||
case "${status}" in
|
case "${status}" in
|
||||||
"")
|
"")
|
||||||
echo -e "\nImage available."
|
echo -e "\nImage available."
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
*"Max attempts exceeded"*)
|
*"Max attempts exceeded"*)
|
||||||
echo -n "."
|
echo -n "."
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Unexpected status: ${status}"
|
echo "Unexpected status: ${status}"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
tag_ami_with_backing_snapshot() {
|
tag_ami_with_backing_snapshot() {
|
||||||
local ami_id=$1
|
local ami_id=$1
|
||||||
local region=$2
|
local region=$2
|
||||||
wait_for_image_available "${ami_id}" "${region}"
|
wait_for_image_available "${ami_id}" "${region}"
|
||||||
local snapshot_id
|
local snapshot_id
|
||||||
snapshot_id=$(aws ec2 describe-images \
|
snapshot_id=$(aws ec2 describe-images \
|
||||||
--region "${region}" \
|
--region "${region}" \
|
||||||
--image-ids "${ami_id}" \
|
--image-ids "${ami_id}" \
|
||||||
--output text --query "Images[0].BlockDeviceMappings[0].Ebs.SnapshotId")
|
--output text --query "Images[0].BlockDeviceMappings[0].Ebs.SnapshotId")
|
||||||
aws ec2 create-tags \
|
aws ec2 create-tags \
|
||||||
--region "${region}" \
|
--region "${region}" \
|
||||||
--resources "${ami_id}" "${snapshot_id}" \
|
--resources "${ami_id}" "${snapshot_id}" \
|
||||||
--tags "Key=Name,Value=${AWS_IMAGE_NAME}"
|
--tags "Key=Name,Value=${AWS_IMAGE_NAME}"
|
||||||
}
|
}
|
||||||
|
|
||||||
make_ami_public() {
|
make_ami_public() {
|
||||||
local ami_id=$1
|
local ami_id=$1
|
||||||
local region=$2
|
local region=$2
|
||||||
if [ "${AWS_PUBLISH-}" != "true" ]; then
|
if [[ ${AWS_PUBLISH-} != "true" ]]; then
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
aws ec2 modify-image-attribute \
|
aws ec2 modify-image-attribute \
|
||||||
--region "${region}" \
|
--region "${region}" \
|
||||||
--image-id "${ami_id}" \
|
--image-id "${ami_id}" \
|
||||||
--launch-permission "Add=[{Group=all}]"
|
--launch-permission "Add=[{Group=all}]"
|
||||||
}
|
}
|
||||||
|
|
||||||
create_ami_from_raw_disk() {
|
create_ami_from_raw_disk() {
|
||||||
echo "Uploading raw disk image to S3"
|
echo "Uploading raw disk image to S3"
|
||||||
aws s3 cp "${AWS_IMAGE_PATH}" "s3://${AWS_BUCKET}/${AWS_IMAGE_FILENAME}" --no-progress
|
aws s3 cp "${AWS_IMAGE_PATH}" "s3://${AWS_BUCKET}/${AWS_IMAGE_FILENAME}" --no-progress
|
||||||
printf '{
|
printf '{
|
||||||
"Description": "%s",
|
"Description": "%s",
|
||||||
"Format": "raw",
|
"Format": "raw",
|
||||||
"UserBucket": {
|
"UserBucket": {
|
||||||
|
@ -108,61 +108,60 @@ create_ami_from_raw_disk() {
|
||||||
"S3Key": "%s"
|
"S3Key": "%s"
|
||||||
}
|
}
|
||||||
}' "${AWS_IMAGE_NAME}" "${AWS_BUCKET}" "${AWS_IMAGE_FILENAME}" > "${CONTAINERS_JSON}"
|
}' "${AWS_IMAGE_NAME}" "${AWS_BUCKET}" "${AWS_IMAGE_FILENAME}" > "${CONTAINERS_JSON}"
|
||||||
IMPORT_SNAPSHOT=$(aws ec2 import-snapshot --region "${AWS_REGION}" --disk-container "file://${CONTAINERS_JSON}")
|
IMPORT_SNAPSHOT=$(aws ec2 import-snapshot --region "${AWS_REGION}" --disk-container "file://${CONTAINERS_JSON}")
|
||||||
echo "${IMPORT_SNAPSHOT}"
|
echo "${IMPORT_SNAPSHOT}"
|
||||||
IMPORT_TASK_ID=$(echo "${IMPORT_SNAPSHOT}" | jq -r '.ImportTaskId')
|
IMPORT_TASK_ID=$(echo "${IMPORT_SNAPSHOT}" | jq -r '.ImportTaskId')
|
||||||
aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}"
|
aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}"
|
||||||
wait_for_import "${IMPORT_TASK_ID}"
|
wait_for_import "${IMPORT_TASK_ID}"
|
||||||
AWS_SNAPSHOT=$(aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}" | jq -r '.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId')
|
AWS_SNAPSHOT=$(aws ec2 describe-import-snapshot-tasks --region "${AWS_REGION}" --import-task-ids "${IMPORT_TASK_ID}" | jq -r '.ImportSnapshotTasks[0].SnapshotTaskDetail.SnapshotId')
|
||||||
echo "Deleting raw disk image from S3"
|
echo "Deleting raw disk image from S3"
|
||||||
aws s3 rm "s3://${AWS_BUCKET}/${AWS_IMAGE_FILENAME}"
|
aws s3 rm "s3://${AWS_BUCKET}/${AWS_IMAGE_FILENAME}"
|
||||||
rm "${CONTAINERS_JSON}"
|
rm "${CONTAINERS_JSON}"
|
||||||
REGISTER_OUT=$(aws ec2 register-image \
|
REGISTER_OUT=$(
|
||||||
--region "${AWS_REGION}" \
|
aws ec2 register-image \
|
||||||
--name "${AWS_IMAGE_NAME}" \
|
--region "${AWS_REGION}" \
|
||||||
--boot-mode uefi \
|
--name "${AWS_IMAGE_NAME}" \
|
||||||
--architecture x86_64 \
|
--boot-mode uefi \
|
||||||
--root-device-name /dev/xvda \
|
--architecture x86_64 \
|
||||||
--block-device-mappings "DeviceName=/dev/xvda,Ebs={SnapshotId=${AWS_SNAPSHOT}}" \
|
--root-device-name /dev/xvda \
|
||||||
--ena-support \
|
--block-device-mappings "DeviceName=/dev/xvda,Ebs={SnapshotId=${AWS_SNAPSHOT}}" \
|
||||||
--tpm-support v2.0 \
|
--ena-support \
|
||||||
--uefi-data "$(cat "${AWS_EFIVARS_PATH}")" \
|
--tpm-support v2.0 \
|
||||||
)
|
--uefi-data "$(cat "${AWS_EFIVARS_PATH}")"
|
||||||
IMAGE_ID=$(echo "${REGISTER_OUT}" | jq -r '.ImageId')
|
)
|
||||||
AMI_FOR_REGION=( ["${AWS_REGION}"]="${IMAGE_ID}")
|
IMAGE_ID=$(echo "${REGISTER_OUT}" | jq -r '.ImageId')
|
||||||
tag_ami_with_backing_snapshot "${IMAGE_ID}" "${AWS_REGION}"
|
AMI_FOR_REGION=(["${AWS_REGION}"]="${IMAGE_ID}")
|
||||||
make_ami_public "${IMAGE_ID}" "${AWS_REGION}"
|
tag_ami_with_backing_snapshot "${IMAGE_ID}" "${AWS_REGION}"
|
||||||
echo "Imported initial AMI as ${IMAGE_ID} in ${AWS_REGION}"
|
make_ami_public "${IMAGE_ID}" "${AWS_REGION}"
|
||||||
|
echo "Imported initial AMI as ${IMAGE_ID} in ${AWS_REGION}"
|
||||||
}
|
}
|
||||||
|
|
||||||
replicate_ami() {
|
replicate_ami() {
|
||||||
local target_region=$1
|
local target_region=$1
|
||||||
local replicated_image_out
|
local replicated_image_out
|
||||||
replicated_image_out=$(aws ec2 copy-image \
|
replicated_image_out=$(aws ec2 copy-image \
|
||||||
--name "${AWS_IMAGE_NAME}" \
|
--name "${AWS_IMAGE_NAME}" \
|
||||||
--source-region "${AWS_REGION}" \
|
--source-region "${AWS_REGION}" \
|
||||||
--source-image-id "${IMAGE_ID}" \
|
--source-image-id "${IMAGE_ID}" \
|
||||||
--region "${target_region}")
|
--region "${target_region}")
|
||||||
local replicated_image_id
|
local replicated_image_id
|
||||||
replicated_image_id=$(echo "${replicated_image_out}" | jq -r '.ImageId')
|
replicated_image_id=$(echo "${replicated_image_out}" | jq -r '.ImageId')
|
||||||
AMI_FOR_REGION["${target_region}"]=${replicated_image_id}
|
AMI_FOR_REGION["${target_region}"]=${replicated_image_id}
|
||||||
echo "Replicated AMI as ${replicated_image_id} in ${target_region}"
|
echo "Replicated AMI as ${replicated_image_id} in ${target_region}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
create_ami_from_raw_disk
|
create_ami_from_raw_disk
|
||||||
# replicate in parallel
|
# replicate in parallel
|
||||||
for region in ${AWS_REPLICATION_REGIONS}; do
|
for region in ${AWS_REPLICATION_REGIONS}; do
|
||||||
replicate_ami "${region}"
|
replicate_ami "${region}"
|
||||||
done
|
done
|
||||||
# wait for all images to be available and tag + publish them
|
# wait for all images to be available and tag + publish them
|
||||||
for region in ${AWS_REPLICATION_REGIONS}; do
|
for region in ${AWS_REPLICATION_REGIONS}; do
|
||||||
tag_ami_with_backing_snapshot "${AMI_FOR_REGION[${region}]}" "${region}"
|
tag_ami_with_backing_snapshot "${AMI_FOR_REGION[${region}]}" "${region}"
|
||||||
make_ami_public "${AMI_FOR_REGION[${region}]}" "${region}"
|
make_ami_public "${AMI_FOR_REGION[${region}]}" "${region}"
|
||||||
done
|
done
|
||||||
echo -n "{\"${AWS_REGION}\": \"${AMI_FOR_REGION[${AWS_REGION}]}\"" > "${AMI_OUTPUT}"
|
echo -n "{\"${AWS_REGION}\": \"${AMI_FOR_REGION[${AWS_REGION}]}\"" > "${AMI_OUTPUT}"
|
||||||
for region in ${AWS_REPLICATION_REGIONS}; do
|
for region in ${AWS_REPLICATION_REGIONS}; do
|
||||||
echo -n ", \"${region}\": \"${AMI_FOR_REGION[${region}]}\"" >> "${AMI_OUTPUT}"
|
echo -n ", \"${region}\": \"${AMI_FOR_REGION[${region}]}\"" >> "${AMI_OUTPUT}"
|
||||||
done
|
done
|
||||||
echo "}" >> "${AMI_OUTPUT}"
|
echo "}" >> "${AMI_OUTPUT}"
|
||||||
|
|
|
@ -6,193 +6,192 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then
|
if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
. "${CONFIG_FILE}"
|
. "${CONFIG_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
CREATE_SIG_VERSION=NO
|
CREATE_SIG_VERSION=NO
|
||||||
POSITIONAL_ARGS=()
|
POSITIONAL_ARGS=()
|
||||||
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
-g|--gallery)
|
-g | --gallery)
|
||||||
CREATE_SIG_VERSION=YES
|
CREATE_SIG_VERSION=YES
|
||||||
shift # past argument
|
shift # past argument
|
||||||
;;
|
;;
|
||||||
--disk-name)
|
--disk-name)
|
||||||
AZURE_DISK_NAME="$2"
|
AZURE_DISK_NAME="$2"
|
||||||
shift # past argument
|
shift # past argument
|
||||||
shift # past value
|
shift # past value
|
||||||
;;
|
;;
|
||||||
-*)
|
-*)
|
||||||
echo "Unknown option $1"
|
echo "Unknown option $1"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
POSITIONAL_ARGS+=("$1") # save positional arg
|
POSITIONAL_ARGS+=("$1") # save positional arg
|
||||||
shift # past argument
|
shift # past argument
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
|
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
|
||||||
|
|
||||||
if [[ "${AZURE_SECURITY_TYPE}" == "ConfidentialVM" ]]; then
|
if [[ ${AZURE_SECURITY_TYPE} == "ConfidentialVM" ]]; then
|
||||||
AZURE_DISK_SECURITY_TYPE=ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey
|
AZURE_DISK_SECURITY_TYPE=ConfidentialVM_VMGuestStateOnlyEncryptedWithPlatformKey
|
||||||
AZURE_SIG_VERSION_ENCRYPTION_TYPE=EncryptedVMGuestStateOnlyWithPmk
|
AZURE_SIG_VERSION_ENCRYPTION_TYPE=EncryptedVMGuestStateOnlyWithPmk
|
||||||
elif [[ "${AZURE_SECURITY_TYPE}" == "ConfidentialVMSupported" ]]; then
|
elif [[ ${AZURE_SECURITY_TYPE} == "ConfidentialVMSupported" ]]; then
|
||||||
AZURE_DISK_SECURITY_TYPE=""
|
AZURE_DISK_SECURITY_TYPE=""
|
||||||
elif [[ "${AZURE_SECURITY_TYPE}" == "TrustedLaunch" ]]; then
|
elif [[ ${AZURE_SECURITY_TYPE} == "TrustedLaunch" ]]; then
|
||||||
AZURE_DISK_SECURITY_TYPE=TrustedLaunch
|
AZURE_DISK_SECURITY_TYPE=TrustedLaunch
|
||||||
else
|
else
|
||||||
echo "Unknown security type: ${AZURE_SECURITY_TYPE}"
|
echo "Unknown security type: ${AZURE_SECURITY_TYPE}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
AZURE_CVM_ENCRYPTION_ARGS=""
|
AZURE_CVM_ENCRYPTION_ARGS=""
|
||||||
if [[ -n "${AZURE_SIG_VERSION_ENCRYPTION_TYPE-}" ]]; then
|
if [[ -n ${AZURE_SIG_VERSION_ENCRYPTION_TYPE-} ]]; then
|
||||||
AZURE_CVM_ENCRYPTION_ARGS=" --target-region-cvm-encryption "
|
AZURE_CVM_ENCRYPTION_ARGS=" --target-region-cvm-encryption "
|
||||||
for _ in ${AZURE_REPLICATION_REGIONS}; do
|
for _ in ${AZURE_REPLICATION_REGIONS}; do
|
||||||
AZURE_CVM_ENCRYPTION_ARGS=" ${AZURE_CVM_ENCRYPTION_ARGS} ${AZURE_SIG_VERSION_ENCRYPTION_TYPE}, "
|
AZURE_CVM_ENCRYPTION_ARGS=" ${AZURE_CVM_ENCRYPTION_ARGS} ${AZURE_SIG_VERSION_ENCRYPTION_TYPE}, "
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
echo "Replicating image in ${AZURE_REPLICATION_REGIONS}"
|
echo "Replicating image in ${AZURE_REPLICATION_REGIONS}"
|
||||||
|
|
||||||
AZURE_VMGS_PATH=$1
|
AZURE_VMGS_PATH=$1
|
||||||
if [[ -z "${AZURE_VMGS_PATH}" ]] && [[ "${AZURE_SECURITY_TYPE}" == "ConfidentialVM" ]]; then
|
if [[ -z ${AZURE_VMGS_PATH} ]] && [[ ${AZURE_SECURITY_TYPE} == "ConfidentialVM" ]]; then
|
||||||
echo "No VMGS path provided - using default ConfidentialVM VMGS"
|
echo "No VMGS path provided - using default ConfidentialVM VMGS"
|
||||||
AZURE_VMGS_PATH="${BLOBS_DIR}/cvm-vmgs.vhd"
|
AZURE_VMGS_PATH="${BLOBS_DIR}/cvm-vmgs.vhd"
|
||||||
elif [[ -z "${AZURE_VMGS_PATH}" ]] && [[ "${AZURE_SECURITY_TYPE}" == "TrustedLaunch" ]]; then
|
elif [[ -z ${AZURE_VMGS_PATH} ]] && [[ ${AZURE_SECURITY_TYPE} == "TrustedLaunch" ]]; then
|
||||||
echo "No VMGS path provided - using default TrsutedLaunch VMGS"
|
echo "No VMGS path provided - using default TrsutedLaunch VMGS"
|
||||||
AZURE_VMGS_PATH="${BLOBS_DIR}/trusted-launch-vmgs.vhd"
|
AZURE_VMGS_PATH="${BLOBS_DIR}/trusted-launch-vmgs.vhd"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
SIZE=$(wc -c "${AZURE_IMAGE_PATH}" | cut -d " " -f1)
|
SIZE=$(wc -c "${AZURE_IMAGE_PATH}" | cut -d " " -f1)
|
||||||
|
|
||||||
create_disk_with_vmgs () {
|
create_disk_with_vmgs() {
|
||||||
az disk create \
|
az disk create \
|
||||||
-n "${AZURE_DISK_NAME}" \
|
-n "${AZURE_DISK_NAME}" \
|
||||||
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
-l "${AZURE_REGION}" \
|
-l "${AZURE_REGION}" \
|
||||||
--hyper-v-generation V2 \
|
--hyper-v-generation V2 \
|
||||||
--os-type Linux \
|
--os-type Linux \
|
||||||
--upload-size-bytes "${SIZE}" \
|
--upload-size-bytes "${SIZE}" \
|
||||||
--sku standard_lrs \
|
--sku standard_lrs \
|
||||||
--upload-type UploadWithSecurityData \
|
--upload-type UploadWithSecurityData \
|
||||||
--security-type "${AZURE_DISK_SECURITY_TYPE}"
|
--security-type "${AZURE_DISK_SECURITY_TYPE}"
|
||||||
az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']"
|
az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']"
|
||||||
DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \
|
DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
--access-level Write --duration-in-seconds 86400 \
|
--access-level Write --duration-in-seconds 86400 \
|
||||||
${AZURE_VMGS_PATH+"--secure-vm-guest-state-sas"})
|
${AZURE_VMGS_PATH+"--secure-vm-guest-state-sas"})
|
||||||
azcopy copy "${AZURE_IMAGE_PATH}" \
|
azcopy copy "${AZURE_IMAGE_PATH}" \
|
||||||
"$(echo "${DISK_SAS}" | jq -r .accessSas)" \
|
"$(echo "${DISK_SAS}" | jq -r .accessSas)" \
|
||||||
--blob-type PageBlob
|
--blob-type PageBlob
|
||||||
if [[ -z "${AZURE_VMGS_PATH}" ]]; then
|
if [[ -z ${AZURE_VMGS_PATH} ]]; then
|
||||||
echo "No VMGS path provided - skipping VMGS upload"
|
echo "No VMGS path provided - skipping VMGS upload"
|
||||||
else
|
else
|
||||||
azcopy copy "${AZURE_VMGS_PATH}" \
|
azcopy copy "${AZURE_VMGS_PATH}" \
|
||||||
"$(echo "${DISK_SAS}" | jq -r .securityDataAccessSas)" \
|
"$(echo "${DISK_SAS}" | jq -r .securityDataAccessSas)" \
|
||||||
--blob-type PageBlob
|
--blob-type PageBlob
|
||||||
fi
|
fi
|
||||||
az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
}
|
}
|
||||||
|
|
||||||
create_disk_without_vmgs () {
|
create_disk_without_vmgs() {
|
||||||
az disk create \
|
az disk create \
|
||||||
-n "${AZURE_DISK_NAME}" \
|
-n "${AZURE_DISK_NAME}" \
|
||||||
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
-g "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
-l "${AZURE_REGION}" \
|
-l "${AZURE_REGION}" \
|
||||||
--hyper-v-generation V2 \
|
--hyper-v-generation V2 \
|
||||||
--os-type Linux \
|
--os-type Linux \
|
||||||
--upload-size-bytes "${SIZE}" \
|
--upload-size-bytes "${SIZE}" \
|
||||||
--sku standard_lrs \
|
--sku standard_lrs \
|
||||||
--upload-type Upload
|
--upload-type Upload
|
||||||
az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
az disk wait --created -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']"
|
az disk list --output table --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}']"
|
||||||
DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \
|
DISK_SAS=$(az disk grant-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
--access-level Write --duration-in-seconds 86400)
|
--access-level Write --duration-in-seconds 86400)
|
||||||
azcopy copy "${AZURE_IMAGE_PATH}" \
|
azcopy copy "${AZURE_IMAGE_PATH}" \
|
||||||
"$(echo "${DISK_SAS}" | jq -r .accessSas)" \
|
"$(echo "${DISK_SAS}" | jq -r .accessSas)" \
|
||||||
--blob-type PageBlob
|
--blob-type PageBlob
|
||||||
az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
az disk revoke-access -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
}
|
}
|
||||||
|
|
||||||
create_disk () {
|
create_disk() {
|
||||||
if [[ -z "${AZURE_VMGS_PATH}" ]]; then
|
if [[ -z ${AZURE_VMGS_PATH} ]]; then
|
||||||
create_disk_without_vmgs
|
create_disk_without_vmgs
|
||||||
else
|
else
|
||||||
create_disk_with_vmgs
|
create_disk_with_vmgs
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_disk () {
|
delete_disk() {
|
||||||
az disk delete -y -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
az disk delete -y -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
}
|
}
|
||||||
|
|
||||||
create_image () {
|
create_image() {
|
||||||
if [[ -n "${AZURE_VMGS_PATH}" ]]; then
|
if [[ -n ${AZURE_VMGS_PATH} ]]; then
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
az image create \
|
az image create \
|
||||||
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
-l "${AZURE_REGION}" \
|
-l "${AZURE_REGION}" \
|
||||||
-n "${AZURE_DISK_NAME}" \
|
-n "${AZURE_DISK_NAME}" \
|
||||||
--hyper-v-generation V2 \
|
--hyper-v-generation V2 \
|
||||||
--os-type Linux \
|
--os-type Linux \
|
||||||
--source "$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)"
|
--source "$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)"
|
||||||
}
|
}
|
||||||
|
|
||||||
delete_image () {
|
delete_image() {
|
||||||
if [[ -n "${AZURE_VMGS_PATH}" ]]; then
|
if [[ -n ${AZURE_VMGS_PATH} ]]; then
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
az image delete -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
az image delete -n "${AZURE_DISK_NAME}" -g "${AZURE_RESOURCE_GROUP_NAME}"
|
||||||
}
|
}
|
||||||
|
|
||||||
create_sig_version () {
|
create_sig_version() {
|
||||||
if [[ -n "${AZURE_VMGS_PATH}" ]]; then
|
if [[ -n ${AZURE_VMGS_PATH} ]]; then
|
||||||
local DISK
|
local DISK
|
||||||
DISK="$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)"
|
DISK="$(az disk list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)"
|
||||||
local SOURCE="--os-snapshot ${DISK}"
|
local SOURCE="--os-snapshot ${DISK}"
|
||||||
else
|
else
|
||||||
local IMAGE
|
local IMAGE
|
||||||
IMAGE="$(az image list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)"
|
IMAGE="$(az image list --query "[?name == '${AZURE_DISK_NAME}' && resourceGroup == '${AZURE_RESOURCE_GROUP_NAME^^}'] | [0].id" --output tsv)"
|
||||||
local SOURCE="--managed-image ${IMAGE}"
|
local SOURCE="--managed-image ${IMAGE}"
|
||||||
fi
|
fi
|
||||||
az sig create -l "${AZURE_REGION}" --gallery-name "${AZURE_GALLERY_NAME}" --resource-group "${AZURE_RESOURCE_GROUP_NAME}" || true
|
az sig create -l "${AZURE_REGION}" --gallery-name "${AZURE_GALLERY_NAME}" --resource-group "${AZURE_RESOURCE_GROUP_NAME}" || true
|
||||||
az sig image-definition create \
|
az sig image-definition create \
|
||||||
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
-l "${AZURE_REGION}" \
|
-l "${AZURE_REGION}" \
|
||||||
--gallery-name "${AZURE_GALLERY_NAME}" \
|
--gallery-name "${AZURE_GALLERY_NAME}" \
|
||||||
--gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \
|
--gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \
|
||||||
--publisher "${AZURE_PUBLISHER}" \
|
--publisher "${AZURE_PUBLISHER}" \
|
||||||
--offer "${AZURE_IMAGE_OFFER}" \
|
--offer "${AZURE_IMAGE_OFFER}" \
|
||||||
--sku "${AZURE_SKU}" \
|
--sku "${AZURE_SKU}" \
|
||||||
--os-type Linux \
|
--os-type Linux \
|
||||||
--os-state generalized \
|
--os-state generalized \
|
||||||
--hyper-v-generation V2 \
|
--hyper-v-generation V2 \
|
||||||
--features SecurityType="${AZURE_SECURITY_TYPE}" || true
|
--features SecurityType="${AZURE_SECURITY_TYPE}" || true
|
||||||
az sig image-version create \
|
az sig image-version create \
|
||||||
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
--resource-group "${AZURE_RESOURCE_GROUP_NAME}" \
|
||||||
-l "${AZURE_REGION}" \
|
-l "${AZURE_REGION}" \
|
||||||
--gallery-name "${AZURE_GALLERY_NAME}" \
|
--gallery-name "${AZURE_GALLERY_NAME}" \
|
||||||
--gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \
|
--gallery-image-definition "${AZURE_IMAGE_DEFINITION}" \
|
||||||
--gallery-image-version "${AZURE_IMAGE_VERSION}" \
|
--gallery-image-version "${AZURE_IMAGE_VERSION}" \
|
||||||
--target-regions "${AZURE_REPLICATION_REGIONS}" \
|
--target-regions "${AZURE_REPLICATION_REGIONS}" \
|
||||||
"${AZURE_CVM_ENCRYPTION_ARGS}" \
|
"${AZURE_CVM_ENCRYPTION_ARGS}" \
|
||||||
--replica-count 1 \
|
--replica-count 1 \
|
||||||
--replication-mode Full \
|
--replication-mode Full \
|
||||||
"${SOURCE}"
|
"${SOURCE}"
|
||||||
}
|
}
|
||||||
|
|
||||||
create_disk
|
create_disk
|
||||||
|
|
||||||
if [[ "${CREATE_SIG_VERSION}" = "YES" ]]; then
|
if [[ ${CREATE_SIG_VERSION} == "YES" ]]; then
|
||||||
create_image
|
create_image
|
||||||
create_sig_version
|
create_sig_version
|
||||||
delete_image
|
delete_image
|
||||||
delete_disk
|
delete_disk
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -6,9 +6,9 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
shopt -s inherit_errexit
|
shopt -s inherit_errexit
|
||||||
|
|
||||||
if [[ -z "${CONFIG_FILE-}" ]] && [[ -f "${CONFIG_FILE-}" ]]; then
|
if [[ -z ${CONFIG_FILE-} ]] && [[ -f ${CONFIG_FILE-} ]]; then
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
. "${CONFIG_FILE}"
|
. "${CONFIG_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
PK_FILE=${PKI}/PK.cer
|
PK_FILE=${PKI}/PK.cer
|
||||||
|
@ -19,15 +19,15 @@ gsutil mb -l "${GCP_REGION}" "gs://${GCP_BUCKET}" || true
|
||||||
gsutil pap set enforced "gs://${GCP_BUCKET}" || true
|
gsutil pap set enforced "gs://${GCP_BUCKET}" || true
|
||||||
gsutil cp "${GCP_IMAGE_PATH}" "gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}"
|
gsutil cp "${GCP_IMAGE_PATH}" "gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}"
|
||||||
gcloud compute images create "${GCP_IMAGE_NAME}" \
|
gcloud compute images create "${GCP_IMAGE_NAME}" \
|
||||||
"--family=${GCP_IMAGE_FAMILY}" \
|
"--family=${GCP_IMAGE_FAMILY}" \
|
||||||
"--source-uri=gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}" \
|
"--source-uri=gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}" \
|
||||||
"--guest-os-features=GVNIC,SEV_CAPABLE,VIRTIO_SCSI_MULTIQUEUE,UEFI_COMPATIBLE" \
|
"--guest-os-features=GVNIC,SEV_CAPABLE,VIRTIO_SCSI_MULTIQUEUE,UEFI_COMPATIBLE" \
|
||||||
"--platform-key-file=${PK_FILE}" \
|
"--platform-key-file=${PK_FILE}" \
|
||||||
"--key-exchange-key-file=${KEK_FILES}" \
|
"--key-exchange-key-file=${KEK_FILES}" \
|
||||||
"--signature-database-file=${DB_FILES}" \
|
"--signature-database-file=${DB_FILES}" \
|
||||||
"--project=${GCP_PROJECT}"
|
"--project=${GCP_PROJECT}"
|
||||||
gcloud compute images add-iam-policy-binding "${GCP_IMAGE_NAME}" \
|
gcloud compute images add-iam-policy-binding "${GCP_IMAGE_NAME}" \
|
||||||
"--project=${GCP_PROJECT}" \
|
"--project=${GCP_PROJECT}" \
|
||||||
--member='allAuthenticatedUsers' \
|
--member='allAuthenticatedUsers' \
|
||||||
--role='roles/compute.imageUser'
|
--role='roles/compute.imageUser'
|
||||||
gsutil rm "gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}"
|
gsutil rm "gs://${GCP_BUCKET}/${GCP_IMAGE_FILENAME}"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue