diff --git a/.github/actions/e2e_lb/action.yml b/.github/actions/e2e_lb/action.yml
index 1fd20f412..1a776e426 100644
--- a/.github/actions/e2e_lb/action.yml
+++ b/.github/actions/e2e_lb/action.yml
@@ -5,6 +5,9 @@ inputs:
kubeconfig:
description: "The kubeconfig of the cluster to test."
required: true
+ cloudProvider:
+ description: "The CSP this test runs on. Some tests exercise functionality not supported everywhere."
+ required: false
runs:
using: "composite"
@@ -20,6 +23,24 @@ runs:
kubectl apply -f lb.yml
bazel run //e2e/internal/lb:lb_test
+ - name: Test AWS Ingress
+ if: inputs.cloudProvider == 'aws'
+ shell: bash
+ env:
+ KUBECONFIG: ${{ inputs.kubeconfig }}
+ working-directory: ./.github/actions/e2e_lb
+ run: |
+ kubectl apply -f aws-ingress.yml
+ kubectl wait -n lb-test ing/whoami --for=jsonpath='{.status.loadBalancer.ingress}' --timeout=5m
+ host=$(kubectl get -n lb-test ingress whoami -o=jsonpath='{.status.loadBalancer.ingress[0].hostname}')
+ for i in $(seq 30); do
+ curl --silent --fail --connect-timeout 5 --output /dev/null http://$host && exit 0
+ sleep 10
+ done
+ echo "::error::Ingress did not become ready in the alloted time."
+ kubectl describe ing -n lb-test
+ exit 1
+
- name: Delete deployment
if: always()
shell: bash
@@ -28,4 +49,5 @@ runs:
working-directory: ./.github/actions/e2e_lb
run: |
kubectl delete -f lb.yml
+ kubectl delete --ignore-not-found -f aws-ingress.yml
kubectl delete -f ns.yml --timeout=5m
diff --git a/.github/actions/e2e_lb/aws-ingress.yml b/.github/actions/e2e_lb/aws-ingress.yml
new file mode 100644
index 000000000..641ecffc0
--- /dev/null
+++ b/.github/actions/e2e_lb/aws-ingress.yml
@@ -0,0 +1,35 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: whoami-internal
+ namespace: lb-test
+spec:
+ selector:
+ app: whoami
+ ports:
+ - port: 80
+ targetPort: 80
+ type: NodePort
+
+---
+
+apiVersion: networking.k8s.io/v1
+kind: Ingress
+metadata:
+ namespace: lb-test
+ name: whoami
+ annotations:
+ alb.ingress.kubernetes.io/scheme: internet-facing
+ alb.ingress.kubernetes.io/target-type: instance
+spec:
+ ingressClassName: alb
+ rules:
+ - http:
+ paths:
+ - path: /
+ pathType: Prefix
+ backend:
+ service:
+ name: whoami-internal
+ port:
+ number: 80
\ No newline at end of file
diff --git a/.github/actions/e2e_test/action.yml b/.github/actions/e2e_test/action.yml
index 36d352b07..36f6d3338 100644
--- a/.github/actions/e2e_test/action.yml
+++ b/.github/actions/e2e_test/action.yml
@@ -365,6 +365,7 @@ runs:
uses: ./.github/actions/e2e_lb
with:
kubeconfig: ${{ steps.constellation-create.outputs.kubeconfig }}
+ cloudProvider: ${{ inputs.cloudProvider }}
- name: Run Performance Benchmark
if: inputs.test == 'perf-bench'
diff --git a/.github/actions/versionsapi/Dockerfile b/.github/actions/versionsapi/Dockerfile
index 759170058..b1018466a 100644
--- a/.github/actions/versionsapi/Dockerfile
+++ b/.github/actions/versionsapi/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.22.0@sha256:7b297d9abee021bab9046e492506b3c2da8a3722cbf301653186545ecc1e00bb as builder
+FROM golang:1.22.1@sha256:34ce21a9696a017249614876638ea37ceca13cdd88f582caad06f87a8aa45bf3 as builder
# Download project root dependencies
WORKDIR /workspace
diff --git a/.github/workflows/build-ccm-gcp.yml b/.github/workflows/build-ccm-gcp.yml
index 312bd4a90..52d33a5af 100644
--- a/.github/workflows/build-ccm-gcp.yml
+++ b/.github/workflows/build-ccm-gcp.yml
@@ -31,7 +31,7 @@ jobs:
- name: Setup Go environment
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
- go-version: "1.22.0"
+ go-version: "1.22.1"
cache: false
- name: Install Crane
diff --git a/.github/workflows/build-os-image-scheduled.yml b/.github/workflows/build-os-image-scheduled.yml
index 577cb9f29..5e3d79c45 100644
--- a/.github/workflows/build-os-image-scheduled.yml
+++ b/.github/workflows/build-os-image-scheduled.yml
@@ -69,7 +69,7 @@ jobs:
- name: Setup Go environment
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
- go-version: "1.22.0"
+ go-version: "1.22.1"
cache: false
- name: Determine version
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 6fa0c6a9e..de17bf19c 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -40,7 +40,7 @@ jobs:
if: matrix.language == 'go'
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
- go-version: "1.22.0"
+ go-version: "1.22.1"
cache: false
- name: Initialize CodeQL
diff --git a/.github/workflows/draft-release.yml b/.github/workflows/draft-release.yml
index e80c88843..fa0821e3d 100644
--- a/.github/workflows/draft-release.yml
+++ b/.github/workflows/draft-release.yml
@@ -316,14 +316,14 @@ jobs:
- provenance-subjects
# This must not be pinned to digest. See:
# https://github.com/slsa-framework/slsa-github-generator#referencing-slsa-builders-and-generators
- uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.9.0
+ uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.10.0
with:
base64-subjects: "${{ needs.provenance-subjects.outputs.provenance-subjects }}"
provenance-verify:
runs-on: ubuntu-22.04
env:
- SLSA_VERIFIER_VERSION: "2.0.1"
+ SLSA_VERIFIER_VERSION: "2.5.1"
needs:
- build-cli
- provenance
diff --git a/.github/workflows/e2e-test-provider-example.yml b/.github/workflows/e2e-test-provider-example.yml
index 6f2d92063..5359358c8 100644
--- a/.github/workflows/e2e-test-provider-example.yml
+++ b/.github/workflows/e2e-test-provider-example.yml
@@ -83,14 +83,6 @@ jobs:
ref: main
stream: nightly
- - name: Create resource prefix
- id: create-prefix
- shell: bash
- run: |
- run_id=${{ github.run_id }}
- last_three="${run_id: -3}"
- echo "prefix=e2e-${last_three}" | tee -a "$GITHUB_OUTPUT"
-
- name: Determine cloudprovider from attestation variant
id: determine
shell: bash
@@ -124,6 +116,16 @@ jobs:
buildBuddyApiKey: ${{ secrets.BUILDBUDDY_ORG_API_KEY }}
nixTools: terraform
+ - name: Create prefix
+ id: create-prefix
+ shell: bash
+ run: |
+ uuid=$(uuidgen | tr "[:upper:]" "[:lower:]")
+ uuid="${uuid%%-*}"
+ uuid="${uuid: -3}" # Final resource name must be no longer than 10 characters on AWS
+ echo "uuid=${uuid}" | tee -a "${GITHUB_OUTPUT}"
+ echo "prefix=e2e-${uuid}" | tee -a "${GITHUB_OUTPUT}"
+
- name: Build Constellation provider and CLI # CLI is needed for the upgrade assert and container push is needed for the microservice upgrade
working-directory: ${{ github.workspace }}
id: build
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index cda56fea0..a09cbff11 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -233,7 +233,7 @@ jobs:
- name: Setup Go environment
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
- go-version: "1.22.0"
+ go-version: "1.22.1"
cache: true
- name: Build generateMeasurements tool
diff --git a/.github/workflows/test-operator-codegen.yml b/.github/workflows/test-operator-codegen.yml
index d8d583b9b..028ef981c 100644
--- a/.github/workflows/test-operator-codegen.yml
+++ b/.github/workflows/test-operator-codegen.yml
@@ -28,7 +28,7 @@ jobs:
- name: Setup Go environment
uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0
with:
- go-version: "1.22.0"
+ go-version: "1.22.1"
cache: true
- name: Run code generation
diff --git a/3rdparty/bazel/org_golang/BUILD.bazel b/3rdparty/bazel/org_golang/BUILD.bazel
new file mode 100644
index 000000000..dc940d416
--- /dev/null
+++ b/3rdparty/bazel/org_golang/BUILD.bazel
@@ -0,0 +1 @@
+exports_files(["go_tls_max_handshake_size.patch"])
diff --git a/3rdparty/bazel/org_golang/go_tls_max_handshake_size.patch b/3rdparty/bazel/org_golang/go_tls_max_handshake_size.patch
new file mode 100644
index 000000000..ac2da752f
--- /dev/null
+++ b/3rdparty/bazel/org_golang/go_tls_max_handshake_size.patch
@@ -0,0 +1,11 @@
+--- src/crypto/tls/common.go
++++ src/crypto/tls/common.go
+@@ -62,7 +62,7 @@
+ maxCiphertext = 16384 + 2048 // maximum ciphertext payload length
+ maxCiphertextTLS13 = 16384 + 256 // maximum ciphertext length in TLS 1.3
+ recordHeaderLen = 5 // record header length
+- maxHandshake = 65536 // maximum handshake we support (protocol max is 16 MB)
++ maxHandshake = 262144 // maximum handshake we support (protocol max is 16 MB)
+ maxUselessRecords = 16 // maximum number of consecutive non-advancing records
+ )
+
diff --git a/3rdparty/gcp-guest-agent/Dockerfile b/3rdparty/gcp-guest-agent/Dockerfile
index 178da4463..e435bfbc3 100644
--- a/3rdparty/gcp-guest-agent/Dockerfile
+++ b/3rdparty/gcp-guest-agent/Dockerfile
@@ -6,7 +6,7 @@ RUN apt-get update && apt-get install -y \
git
# Install Go
-ARG GO_VER=1.22.0
+ARG GO_VER=1.22.1
RUN wget -q https://go.dev/dl/go${GO_VER}.linux-amd64.tar.gz && \
tar -C /usr/local -xzf go${GO_VER}.linux-amd64.tar.gz && \
rm go${GO_VER}.linux-amd64.tar.gz
diff --git a/WORKSPACE.bazel b/WORKSPACE.bazel
index 51db5ca5e..45eff59e0 100644
--- a/WORKSPACE.bazel
+++ b/WORKSPACE.bazel
@@ -165,11 +165,17 @@ load("//bazel/toolchains:go_module_deps.bzl", "go_dependencies")
# gazelle:repository_macro bazel/toolchains/go_module_deps.bzl%go_dependencies
go_dependencies()
-load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies")
+load("@io_bazel_rules_go//go:deps.bzl", "go_download_sdk", "go_register_toolchains", "go_rules_dependencies")
+
+go_download_sdk(
+ name = "go_sdk",
+ patches = ["//3rdparty/bazel/org_golang:go_tls_max_handshake_size.patch"],
+ version = "1.22.3",
+)
go_rules_dependencies()
-go_register_toolchains(version = "1.22.0")
+go_register_toolchains()
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
diff --git a/bazel/ci/license_header.sh.in b/bazel/ci/license_header.sh.in
index 5dba4e4a4..4e5ce470c 100644
--- a/bazel/ci/license_header.sh.in
+++ b/bazel/ci/license_header.sh.in
@@ -26,7 +26,7 @@ noHeader=$(
--exclude-dir 3rdparty \
--exclude-dir build \
-e'SPDX-License-Identifier: AGPL-3.0-only' \
- -e'DO NOT EDIT'
+ -e'DO NOT EDIT' | { grep -v internal/cloud/openstack/clouds || true; }
)
if [[ -z ${noHeader} ]]; then
diff --git a/bazel/toolchains/go_module_deps.bzl b/bazel/toolchains/go_module_deps.bzl
index c273f17c1..a958dd730 100644
--- a/bazel/toolchains/go_module_deps.bzl
+++ b/bazel/toolchains/go_module_deps.bzl
@@ -1355,8 +1355,8 @@ def go_dependencies():
build_file_generation = "on",
build_file_proto_mode = "disable_global",
importpath = "github.com/docker/docker",
- sum = "h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=",
- version = "v25.0.3+incompatible",
+ sum = "h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE=",
+ version = "v25.0.5+incompatible",
)
go_repository(
name = "com_github_docker_docker_credential_helpers",
@@ -6933,16 +6933,16 @@ def go_dependencies():
build_file_generation = "on",
build_file_proto_mode = "disable_global",
importpath = "google.golang.org/protobuf",
- sum = "h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=",
- version = "v1.32.0",
+ sum = "h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=",
+ version = "v1.33.0",
)
go_repository(
name = "org_golang_x_crypto",
build_file_generation = "on",
build_file_proto_mode = "disable_global",
importpath = "golang.org/x/crypto",
- sum = "h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=",
- version = "v0.19.0",
+ sum = "h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=",
+ version = "v0.21.0",
)
go_repository(
name = "org_golang_x_exp",
@@ -6989,8 +6989,8 @@ def go_dependencies():
build_file_generation = "on",
build_file_proto_mode = "disable_global",
importpath = "golang.org/x/net",
- sum = "h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=",
- version = "v0.21.0",
+ sum = "h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=",
+ version = "v0.23.0",
)
go_repository(
name = "org_golang_x_oauth2",
@@ -7013,8 +7013,8 @@ def go_dependencies():
build_file_generation = "on",
build_file_proto_mode = "disable_global",
importpath = "golang.org/x/sys",
- sum = "h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=",
- version = "v0.17.0",
+ sum = "h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=",
+ version = "v0.18.0",
)
go_repository(
name = "org_golang_x_telemetry",
@@ -7029,8 +7029,8 @@ def go_dependencies():
build_file_generation = "on",
build_file_proto_mode = "disable_global",
importpath = "golang.org/x/term",
- sum = "h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=",
- version = "v0.17.0",
+ sum = "h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=",
+ version = "v0.18.0",
)
go_repository(
name = "org_golang_x_text",
diff --git a/bootstrapper/initproto/init.pb.go b/bootstrapper/initproto/init.pb.go
index 49401ec0a..e2d1e2cf6 100644
--- a/bootstrapper/initproto/init.pb.go
+++ b/bootstrapper/initproto/init.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.22.1
// source: bootstrapper/initproto/init.proto
diff --git a/bootstrapper/internal/joinclient/joinclient.go b/bootstrapper/internal/joinclient/joinclient.go
index 110b52a66..8f44fa115 100644
--- a/bootstrapper/internal/joinclient/joinclient.go
+++ b/bootstrapper/internal/joinclient/joinclient.go
@@ -150,6 +150,7 @@ func (c *JoinClient) Start(cleaner cleaner) {
return
} else if isUnrecoverable(err) {
c.log.With(slog.Any("error", err)).Error("Unrecoverable error occurred")
+ // TODO(burgerdev): this should eventually lead to a full node reset
return
}
c.log.With(slog.Any("error", err)).Warn("Join failed for all available endpoints")
@@ -310,7 +311,15 @@ func (c *JoinClient) startNodeAndJoin(ticket *joinproto.IssueJoinTicketResponse,
CACertHashes: []string{ticket.DiscoveryTokenCaCertHash},
}
- if err := c.joiner.JoinCluster(ctx, btd, c.role, ticket.KubernetesComponents, c.log); err != nil {
+ // We currently cannot recover from any failure in this function. Joining the k8s cluster
+ // sometimes fails transiently, and we don't want to brick the node because of that.
+ for i := 0; i < 3; i++ {
+ err = c.joiner.JoinCluster(ctx, btd, c.role, ticket.KubernetesComponents, c.log)
+ if err != nil {
+ c.log.Error("failed to join k8s cluster", "role", c.role, "attempt", i, "error", err)
+ }
+ }
+ if err != nil {
return fmt.Errorf("joining Kubernetes cluster: %w", err)
}
diff --git a/bootstrapper/internal/joinclient/joinclient_test.go b/bootstrapper/internal/joinclient/joinclient_test.go
index 4684b2eb4..d22ed4fb9 100644
--- a/bootstrapper/internal/joinclient/joinclient_test.go
+++ b/bootstrapper/internal/joinclient/joinclient_test.go
@@ -62,6 +62,7 @@ func TestClient(t *testing.T) {
apiAnswers []any
wantLock bool
wantJoin bool
+ wantNumJoins int
}{
"on worker: metadata self: errors occur": {
role: role.Worker,
@@ -168,12 +169,26 @@ func TestClient(t *testing.T) {
listAnswer{instances: peers},
issueJoinTicketAnswer{},
},
- clusterJoiner: &stubClusterJoiner{joinClusterErr: someErr},
+ clusterJoiner: &stubClusterJoiner{numBadCalls: -1, joinClusterErr: someErr},
nodeLock: newFakeLock(),
disk: &stubDisk{},
wantJoin: true,
wantLock: true,
},
+ "on control plane: joinCluster fails transiently": {
+ role: role.ControlPlane,
+ apiAnswers: []any{
+ selfAnswer{instance: controlSelf},
+ listAnswer{instances: peers},
+ issueJoinTicketAnswer{},
+ },
+ clusterJoiner: &stubClusterJoiner{numBadCalls: 1, joinClusterErr: someErr},
+ nodeLock: newFakeLock(),
+ disk: &stubDisk{},
+ wantJoin: true,
+ wantLock: true,
+ wantNumJoins: 2,
+ },
"on control plane: node already locked": {
role: role.ControlPlane,
apiAnswers: []any{
@@ -250,9 +265,12 @@ func TestClient(t *testing.T) {
client.Stop()
if tc.wantJoin {
- assert.True(tc.clusterJoiner.joinClusterCalled)
+ assert.Greater(tc.clusterJoiner.joinClusterCalled, 0)
} else {
- assert.False(tc.clusterJoiner.joinClusterCalled)
+ assert.Equal(0, tc.clusterJoiner.joinClusterCalled)
+ }
+ if tc.wantNumJoins > 0 {
+ assert.GreaterOrEqual(tc.clusterJoiner.joinClusterCalled, tc.wantNumJoins)
}
if tc.wantLock {
assert.False(client.nodeLock.TryLockOnce(nil)) // lock should be locked
@@ -398,12 +416,17 @@ type issueJoinTicketAnswer struct {
}
type stubClusterJoiner struct {
- joinClusterCalled bool
+ joinClusterCalled int
+ numBadCalls int
joinClusterErr error
}
func (j *stubClusterJoiner) JoinCluster(context.Context, *kubeadm.BootstrapTokenDiscovery, role.Role, components.Components, *slog.Logger) error {
- j.joinClusterCalled = true
+ j.joinClusterCalled++
+ if j.numBadCalls == 0 {
+ return nil
+ }
+ j.numBadCalls--
return j.joinClusterErr
}
diff --git a/cli/internal/cloudcmd/BUILD.bazel b/cli/internal/cloudcmd/BUILD.bazel
index 83b394338..946322495 100644
--- a/cli/internal/cloudcmd/BUILD.bazel
+++ b/cli/internal/cloudcmd/BUILD.bazel
@@ -25,6 +25,7 @@ go_library(
"//internal/cloud/cloudprovider",
"//internal/cloud/gcpshared",
"//internal/cloud/openstack",
+ "//internal/cloud/openstack/clouds",
"//internal/config",
"//internal/constants",
"//internal/constellation",
diff --git a/cli/internal/cloudcmd/serviceaccount.go b/cli/internal/cloudcmd/serviceaccount.go
index 994aaa5b0..7c54a0b9f 100644
--- a/cli/internal/cloudcmd/serviceaccount.go
+++ b/cli/internal/cloudcmd/serviceaccount.go
@@ -13,6 +13,7 @@ import (
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
"github.com/edgelesssys/constellation/v2/internal/cloud/openstack"
+ "github.com/edgelesssys/constellation/v2/internal/cloud/openstack/clouds"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constellation"
"github.com/edgelesssys/constellation/v2/internal/file"
@@ -38,15 +39,23 @@ func GetMarshaledServiceAccountURI(config *config.Config, fileHandler file.Handl
}
case cloudprovider.OpenStack:
+ cloudsYAML, err := clouds.ReadCloudsYAML(fileHandler, config.Provider.OpenStack.CloudsYAMLPath)
+ if err != nil {
+ return "", fmt.Errorf("reading clouds.yaml: %w", err)
+ }
+ cloud, ok := cloudsYAML.Clouds[config.Provider.OpenStack.Cloud]
+ if !ok {
+ return "", fmt.Errorf("cloud %q not found in clouds.yaml", config.Provider.OpenStack.Cloud)
+ }
payload.OpenStack = openstack.AccountKey{
- AuthURL: config.Provider.OpenStack.AuthURL,
- Username: config.Provider.OpenStack.Username,
- Password: config.Provider.OpenStack.Password,
- ProjectID: config.Provider.OpenStack.ProjectID,
- ProjectName: config.Provider.OpenStack.ProjectName,
- UserDomainName: config.Provider.OpenStack.UserDomainName,
- ProjectDomainName: config.Provider.OpenStack.ProjectDomainName,
- RegionName: config.Provider.OpenStack.RegionName,
+ AuthURL: cloud.AuthInfo.AuthURL,
+ Username: cloud.AuthInfo.Username,
+ Password: cloud.AuthInfo.Password,
+ ProjectID: cloud.AuthInfo.ProjectID,
+ ProjectName: cloud.AuthInfo.ProjectName,
+ UserDomainName: cloud.AuthInfo.UserDomainName,
+ ProjectDomainName: cloud.AuthInfo.ProjectDomainName,
+ RegionName: cloud.RegionName,
}
}
diff --git a/cli/internal/cloudcmd/tfvars.go b/cli/internal/cloudcmd/tfvars.go
index ea53cff5d..309632d98 100644
--- a/cli/internal/cloudcmd/tfvars.go
+++ b/cli/internal/cloudcmd/tfvars.go
@@ -257,11 +257,9 @@ func openStackTerraformVars(conf *config.Config, imageRef string) (*terraform.Op
return &terraform.OpenStackClusterVariables{
Name: conf.Name,
Cloud: toPtr(conf.Provider.OpenStack.Cloud),
+ OpenStackCloudsYAMLPath: conf.Provider.OpenStack.CloudsYAMLPath,
FloatingIPPoolID: conf.Provider.OpenStack.FloatingIPPoolID,
ImageID: imageRef,
- OpenstackUserDomainName: conf.Provider.OpenStack.UserDomainName,
- OpenstackUsername: conf.Provider.OpenStack.Username,
- OpenstackPassword: conf.Provider.OpenStack.Password,
Debug: conf.IsDebugCluster(),
NodeGroups: nodeGroups,
CustomEndpoint: conf.CustomEndpoint,
diff --git a/cli/internal/cmd/apply.go b/cli/internal/cmd/apply.go
index ea7679a50..2db2f318c 100644
--- a/cli/internal/cmd/apply.go
+++ b/cli/internal/cmd/apply.go
@@ -40,7 +40,7 @@ import (
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
"github.com/edgelesssys/constellation/v2/internal/semver"
"github.com/edgelesssys/constellation/v2/internal/versions"
- "github.com/samber/slog-multi"
+ slogmulti "github.com/samber/slog-multi"
"github.com/spf13/afero"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
@@ -365,7 +365,7 @@ func (a *applyCmd) apply(
}
// Check license
- a.checkLicenseFile(cmd, conf.GetProvider())
+ a.checkLicenseFile(cmd, conf.GetProvider(), conf.UseMarketplaceImage())
// Now start actually running the apply command
@@ -449,7 +449,7 @@ func (a *applyCmd) apply(
func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationconfigapi.Fetcher) (*config.Config, *state.State, error) {
// Read user's config and state file
- a.log.Debug(fmt.Sprintf("Reading config from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
+ a.log.Debug(fmt.Sprintf("Reading config from %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
conf, err := config.New(a.fileHandler, constants.ConfigFilename, configFetcher, a.flags.force)
var configValidationErr *config.ValidationError
if errors.As(err, &configValidationErr) {
@@ -459,7 +459,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
return nil, nil, err
}
- a.log.Debug(fmt.Sprintf("Reading state file from %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)))
+ a.log.Debug(fmt.Sprintf("Reading state file from %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)))
stateFile, err := state.CreateOrRead(a.fileHandler, constants.StateFilename)
if err != nil {
return nil, nil, err
@@ -528,10 +528,10 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
// If we need to run the init RPC, the version has to be valid
// Otherwise, we are able to use an outdated version, meaning we skip the K8s upgrade
// We skip version validation if the user explicitly skips the Kubernetes phase
- a.log.Debug(fmt.Sprintf("Validating Kubernetes version %s", conf.KubernetesVersion))
+ a.log.Debug(fmt.Sprintf("Validating Kubernetes version %q", conf.KubernetesVersion))
validVersion, err := versions.NewValidK8sVersion(string(conf.KubernetesVersion), true)
if err != nil {
- a.log.Debug(fmt.Sprintf("Kubernetes version not valid: %s", err))
+ a.log.Debug(fmt.Sprintf("Kubernetes version not valid: %q", err))
if !a.flags.skipPhases.contains(skipInitPhase) {
return nil, nil, err
}
@@ -570,7 +570,7 @@ func (a *applyCmd) validateInputs(cmd *cobra.Command, configFetcher attestationc
cmd.PrintErrf("Warning: Constellation with Kubernetes %s is still in preview. Use only for evaluation purposes.\n", validVersion)
}
conf.KubernetesVersion = validVersion
- a.log.Debug(fmt.Sprintf("Target Kubernetes version set to %s", conf.KubernetesVersion))
+ a.log.Debug(fmt.Sprintf("Target Kubernetes version set to %q", conf.KubernetesVersion))
// Validate microservice version (helm versions) in the user's config matches the version of the CLI
// This makes sure we catch potential errors early, not just after we already ran Terraform migrations or the init RPC
@@ -598,7 +598,7 @@ func (a *applyCmd) applyJoinConfig(cmd *cobra.Command, newConfig config.Attestat
) error {
clusterAttestationConfig, err := a.applier.GetClusterAttestationConfig(cmd.Context(), newConfig.GetVariant())
if err != nil {
- a.log.Debug(fmt.Sprintf("Getting cluster attestation config failed: %s", err))
+ a.log.Debug(fmt.Sprintf("Getting cluster attestation config failed: %q", err))
if k8serrors.IsNotFound(err) {
a.log.Debug("Creating new join config")
return a.applier.ApplyJoinConfig(cmd.Context(), newConfig, measurementSalt)
@@ -844,7 +844,7 @@ type applier interface {
// methods required to install/upgrade Helm charts
PrepareHelmCharts(
- flags helm.Options, state *state.State, serviceAccURI string, masterSecret uri.MasterSecret, openStackCfg *config.OpenStackConfig,
+ flags helm.Options, state *state.State, serviceAccURI string, masterSecret uri.MasterSecret,
) (helm.Applier, bool, error)
// methods to interact with Kubernetes
diff --git a/cli/internal/cmd/apply_test.go b/cli/internal/cmd/apply_test.go
index a177cd1d4..064e1f42b 100644
--- a/cli/internal/cmd/apply_test.go
+++ b/cli/internal/cmd/apply_test.go
@@ -554,7 +554,7 @@ func (s *stubConstellApplier) Init(context.Context, atls.Validator, *state.State
type helmApplier interface {
PrepareHelmCharts(
- flags helm.Options, stateFile *state.State, serviceAccURI string, masterSecret uri.MasterSecret, openStackCfg *config.OpenStackConfig,
+ flags helm.Options, stateFile *state.State, serviceAccURI string, masterSecret uri.MasterSecret,
) (
helm.Applier, bool, error)
}
diff --git a/cli/internal/cmd/applyhelm.go b/cli/internal/cmd/applyhelm.go
index 79ae2a6d7..bd629d348 100644
--- a/cli/internal/cmd/applyhelm.go
+++ b/cli/internal/cmd/applyhelm.go
@@ -43,6 +43,18 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi
ApplyTimeout: a.flags.helmTimeout,
AllowDestructive: helm.DenyDestructive,
}
+ if conf.Provider.OpenStack != nil {
+ var deployYawolLoadBalancer bool
+ if conf.Provider.OpenStack.DeployYawolLoadBalancer != nil {
+ deployYawolLoadBalancer = *conf.Provider.OpenStack.DeployYawolLoadBalancer
+ }
+ options.OpenStackValues = &helm.OpenStackValues{
+ DeployYawolLoadBalancer: deployYawolLoadBalancer,
+ FloatingIPPoolID: conf.Provider.OpenStack.FloatingIPPoolID,
+ YawolFlavorID: conf.Provider.OpenStack.YawolFlavorID,
+ YawolImageID: conf.Provider.OpenStack.YawolImageID,
+ }
+ }
a.log.Debug("Getting service account URI")
serviceAccURI, err := cloudcmd.GetMarshaledServiceAccountURI(conf, a.fileHandler)
@@ -51,7 +63,7 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi
}
a.log.Debug("Preparing Helm charts")
- executor, includesUpgrades, err := a.applier.PrepareHelmCharts(options, stateFile, serviceAccURI, masterSecret, conf.Provider.OpenStack)
+ executor, includesUpgrades, err := a.applier.PrepareHelmCharts(options, stateFile, serviceAccURI, masterSecret)
if errors.Is(err, helm.ErrConfirmationMissing) {
if !a.flags.yes {
cmd.PrintErrln("WARNING: Upgrading cert-manager will destroy all custom resources you have manually created that are based on the current version of cert-manager.")
@@ -65,7 +77,7 @@ func (a *applyCmd) runHelmApply(cmd *cobra.Command, conf *config.Config, stateFi
}
}
options.AllowDestructive = helm.AllowDestructive
- executor, includesUpgrades, err = a.applier.PrepareHelmCharts(options, stateFile, serviceAccURI, masterSecret, conf.Provider.OpenStack)
+ executor, includesUpgrades, err = a.applier.PrepareHelmCharts(options, stateFile, serviceAccURI, masterSecret)
}
var upgradeErr *compatibility.InvalidUpgradeError
if err != nil {
@@ -108,7 +120,7 @@ func (a *applyCmd) backupHelmCharts(
if err := executor.SaveCharts(chartDir, a.fileHandler); err != nil {
return fmt.Errorf("saving Helm charts to disk: %w", err)
}
- a.log.Debug(fmt.Sprintf("Helm charts saved to %s", a.flags.pathPrefixer.PrefixPrintablePath(chartDir)))
+ a.log.Debug(fmt.Sprintf("Helm charts saved to %q", a.flags.pathPrefixer.PrefixPrintablePath(chartDir)))
if includesUpgrades {
a.log.Debug("Creating backup of CRDs and CRs")
diff --git a/cli/internal/cmd/applyinit.go b/cli/internal/cmd/applyinit.go
index 34ab7f1a9..e3e99b0b4 100644
--- a/cli/internal/cmd/applyinit.go
+++ b/cli/internal/cmd/applyinit.go
@@ -29,7 +29,7 @@ import (
// On success, it writes the Kubernetes admin config file to disk.
// Therefore it is skipped if the Kubernetes admin config file already exists.
func (a *applyCmd) runInit(cmd *cobra.Command, conf *config.Config, stateFile *state.State) (*bytes.Buffer, error) {
- a.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()))
+ a.log.Debug(fmt.Sprintf("Creating aTLS Validator for %q", conf.GetAttestationConfig().GetVariant()))
validator, err := choose.Validator(conf.GetAttestationConfig(), a.wLog)
if err != nil {
return nil, fmt.Errorf("creating validator: %w", err)
@@ -121,7 +121,7 @@ func (a *applyCmd) writeInitOutput(
if err := a.fileHandler.Write(constants.AdminConfFilename, initResp.Kubeconfig, file.OptNone); err != nil {
return fmt.Errorf("writing kubeconfig: %w", err)
}
- a.log.Debug(fmt.Sprintf("Kubeconfig written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)))
+ a.log.Debug(fmt.Sprintf("Kubeconfig written to %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.AdminConfFilename)))
if mergeConfig {
if err := a.merger.mergeConfigs(constants.AdminConfFilename, a.fileHandler); err != nil {
@@ -136,7 +136,7 @@ func (a *applyCmd) writeInitOutput(
return fmt.Errorf("writing Constellation state file: %w", err)
}
- a.log.Debug(fmt.Sprintf("Constellation state file written to %s", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)))
+ a.log.Debug(fmt.Sprintf("Constellation state file written to %q", a.flags.pathPrefixer.PrefixPrintablePath(constants.StateFilename)))
if !mergeConfig {
fmt.Fprintln(wr, "You can now connect to your cluster by executing:")
diff --git a/cli/internal/cmd/configfetchmeasurements.go b/cli/internal/cmd/configfetchmeasurements.go
index 04af8632c..83a8e55c2 100644
--- a/cli/internal/cmd/configfetchmeasurements.go
+++ b/cli/internal/cmd/configfetchmeasurements.go
@@ -104,7 +104,7 @@ func runConfigFetchMeasurements(cmd *cobra.Command, _ []string) error {
if err := cfm.flags.parse(cmd.Flags()); err != nil {
return fmt.Errorf("parsing flags: %w", err)
}
- cfm.log.Debug(fmt.Sprintf("Using flags %+v", cfm.flags))
+ cfm.log.Debug("Using flags", "insecure", cfm.flags.insecure, "measurementsURL", cfm.flags.measurementsURL, "signatureURL", cfm.flags.signatureURL)
fetcher := attestationconfigapi.NewFetcherWithClient(http.DefaultClient, constants.CDNRepositoryURL)
return cfm.configFetchMeasurements(cmd, fileHandler, fetcher)
@@ -152,14 +152,14 @@ func (cfm *configFetchMeasurementsCmd) configFetchMeasurements(
return fmt.Errorf("fetching and verifying measurements: %w", err)
}
}
- cfm.log.Debug(fmt.Sprintf("Measurements: %#v\n", fetchedMeasurements))
+ cfm.log.Debug(fmt.Sprintf("Measurements: %s", fetchedMeasurements.String()))
cfm.log.Debug("Updating measurements in configuration")
conf.UpdateMeasurements(fetchedMeasurements)
if err := fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil {
return err
}
- cfm.log.Debug(fmt.Sprintf("Configuration written to %s", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
+ cfm.log.Debug(fmt.Sprintf("Configuration written to %q", cfm.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
cmd.Print("Successfully fetched measurements and updated Configuration\n")
return nil
}
diff --git a/cli/internal/cmd/configgenerate.go b/cli/internal/cmd/configgenerate.go
index 666b7284d..4fabe40e3 100644
--- a/cli/internal/cmd/configgenerate.go
+++ b/cli/internal/cmd/configgenerate.go
@@ -85,13 +85,13 @@ func runConfigGenerate(cmd *cobra.Command, args []string) error {
if err := cg.flags.parse(cmd.Flags()); err != nil {
return fmt.Errorf("parsing flags: %w", err)
}
- log.Debug(fmt.Sprintf("Parsed flags as %+v", cg.flags))
+ log.Debug("Using flags", "k8sVersion", cg.flags.k8sVersion, "attestationVariant", cg.flags.attestationVariant)
return cg.configGenerate(cmd, fileHandler, provider, args[0])
}
func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file.Handler, provider cloudprovider.Provider, rawProvider string) error {
- cg.log.Debug(fmt.Sprintf("Using cloud provider %s", provider.String()))
+ cg.log.Debug(fmt.Sprintf("Using cloud provider %q", provider.String()))
// Config creation
conf, err := createConfigWithAttestationVariant(provider, rawProvider, cg.flags.attestationVariant)
@@ -128,7 +128,7 @@ func (cg *configGenerateCmd) configGenerate(cmd *cobra.Command, fileHandler file
// createConfigWithAttestationVariant creates a config file for the given provider.
func createConfigWithAttestationVariant(provider cloudprovider.Provider, rawProvider string, attestationVariant variant.Variant) (*config.Config, error) {
- conf := config.Default().WithOpenStackProviderDefaults(rawProvider)
+ conf := config.Default().WithOpenStackProviderDefaults(provider, rawProvider)
conf.RemoveProviderExcept(provider)
// set a lower default for QEMU's state disk
diff --git a/cli/internal/cmd/configgenerate_test.go b/cli/internal/cmd/configgenerate_test.go
index 2533cffcb..d1a4fbc92 100644
--- a/cli/internal/cmd/configgenerate_test.go
+++ b/cli/internal/cmd/configgenerate_test.go
@@ -140,7 +140,7 @@ func TestConfigGenerateDefaultProviderSpecific(t *testing.T) {
fileHandler := file.NewHandler(afero.NewMemMapFs())
cmd := newConfigGenerateCmd()
- wantConf := config.Default().WithOpenStackProviderDefaults(tc.rawProvider)
+ wantConf := config.Default().WithOpenStackProviderDefaults(cloudprovider.OpenStack, tc.rawProvider)
wantConf.RemoveProviderAndAttestationExcept(tc.provider)
cg := &configGenerateCmd{
diff --git a/cli/internal/cmd/configinstancetypes.go b/cli/internal/cmd/configinstancetypes.go
index 0d768d2b1..555ad5bb2 100644
--- a/cli/internal/cmd/configinstancetypes.go
+++ b/cli/internal/cmd/configinstancetypes.go
@@ -38,6 +38,8 @@ Azure Trusted Launch instance types:
%v
GCP instance types:
%v
+STACKIT instance types:
+%v
`,
formatInstanceTypes(instancetypes.AWSSNPSupportedInstanceFamilies),
formatInstanceTypes(instancetypes.AWSSupportedInstanceFamilies),
@@ -45,6 +47,7 @@ GCP instance types:
formatInstanceTypes(instancetypes.AzureSNPInstanceTypes),
formatInstanceTypes(instancetypes.AzureTrustedLaunchInstanceTypes),
formatInstanceTypes(instancetypes.GCPInstanceTypes),
+ formatInstanceTypes(instancetypes.STACKITInstanceTypes),
)
}
diff --git a/cli/internal/cmd/iamcreate.go b/cli/internal/cmd/iamcreate.go
index 4067b33b0..b2e44f4a2 100644
--- a/cli/internal/cmd/iamcreate.go
+++ b/cli/internal/cmd/iamcreate.go
@@ -133,7 +133,7 @@ func (c *iamCreator) create(ctx context.Context) error {
var conf config.Config
if c.flags.updateConfig {
- c.log.Debug(fmt.Sprintf("Parsing config %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
+ c.log.Debug(fmt.Sprintf("Parsing config %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
if err := c.fileHandler.ReadYAML(constants.ConfigFilename, &conf); err != nil {
return fmt.Errorf("error reading the configuration file: %w", err)
}
@@ -161,7 +161,7 @@ func (c *iamCreator) create(ctx context.Context) error {
}
if c.flags.updateConfig {
- c.log.Debug(fmt.Sprintf("Writing IAM configuration to %s", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
+ c.log.Debug(fmt.Sprintf("Writing IAM configuration to %q", c.flags.pathPrefixer.PrefixPrintablePath(constants.ConfigFilename)))
c.providerCreator.writeOutputValuesToConfig(&conf, iamFile)
if err := c.fileHandler.WriteYAML(constants.ConfigFilename, conf, file.OptOverwrite); err != nil {
return err
diff --git a/cli/internal/cmd/init.go b/cli/internal/cmd/init.go
index 8075db901..a1e3e3632 100644
--- a/cli/internal/cmd/init.go
+++ b/cli/internal/cmd/init.go
@@ -82,7 +82,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand
// Set the current context to the cluster we just created
cfg.CurrentContext = constellConfig.CurrentContext
- c.log.Debug(fmt.Sprintf("Set current context to %s", cfg.CurrentContext))
+ c.log.Debug(fmt.Sprintf("Set current context to %q", cfg.CurrentContext))
json, err := runtime.Encode(clientcodec.Codec, cfg)
if err != nil {
@@ -97,7 +97,7 @@ func (c *kubeconfigMerger) mergeConfigs(configPath string, fileHandler file.Hand
if err := fileHandler.Write(clientcmd.RecommendedHomeFile, mergedKubeconfig, file.OptOverwrite); err != nil {
return fmt.Errorf("writing merged kubeconfig to file: %w", err)
}
- c.log.Debug(fmt.Sprintf("Merged kubeconfig into default config file: %s", clientcmd.RecommendedHomeFile))
+ c.log.Debug(fmt.Sprintf("Merged kubeconfig into default config file: %q", clientcmd.RecommendedHomeFile))
return nil
}
diff --git a/cli/internal/cmd/init_test.go b/cli/internal/cmd/init_test.go
index f55b7e77c..8d6d2b1bb 100644
--- a/cli/internal/cmd/init_test.go
+++ b/cli/internal/cmd/init_test.go
@@ -279,7 +279,7 @@ type stubHelmApplier struct {
}
func (s stubHelmApplier) PrepareHelmCharts(
- _ helm.Options, _ *state.State, _ string, _ uri.MasterSecret, _ *config.OpenStackConfig,
+ _ helm.Options, _ *state.State, _ string, _ uri.MasterSecret,
) (helm.Applier, bool, error) {
return stubRunner{}, false, s.err
}
diff --git a/cli/internal/cmd/license_enterprise.go b/cli/internal/cmd/license_enterprise.go
index 79ae2bf7c..d4afe973e 100644
--- a/cli/internal/cmd/license_enterprise.go
+++ b/cli/internal/cmd/license_enterprise.go
@@ -22,18 +22,22 @@ import (
// with the license server. If no license file is present or if errors
// occur during the check, the user is informed and the community license
// is used. It is a no-op in the open source version of Constellation.
-func (a *applyCmd) checkLicenseFile(cmd *cobra.Command, csp cloudprovider.Provider) {
+func (a *applyCmd) checkLicenseFile(cmd *cobra.Command, csp cloudprovider.Provider, useMarketplaceImage bool) {
var licenseID string
a.log.Debug("Running license check")
readBytes, err := a.fileHandler.Read(constants.LicenseFilename)
- if errors.Is(err, fs.ErrNotExist) {
- cmd.Printf("Using community license.\n")
+ switch {
+ case useMarketplaceImage:
+ cmd.Println("Using marketplace image billing.")
+ licenseID = license.MarketplaceLicense
+ case errors.Is(err, fs.ErrNotExist):
+ cmd.Println("Using community license.")
licenseID = license.CommunityLicense
- } else if err != nil {
+ case err != nil:
cmd.Printf("Error: %v\nContinuing with community license.\n", err)
licenseID = license.CommunityLicense
- } else {
+ default:
cmd.Printf("Constellation license found!\n")
licenseID, err = license.FromBytes(readBytes)
if err != nil {
@@ -43,9 +47,11 @@ func (a *applyCmd) checkLicenseFile(cmd *cobra.Command, csp cloudprovider.Provid
}
quota, err := a.applier.CheckLicense(cmd.Context(), csp, !a.flags.skipPhases.contains(skipInitPhase), licenseID)
- if err != nil {
+ if err != nil && !useMarketplaceImage {
cmd.Printf("Unable to contact license server.\n")
cmd.Printf("Please keep your vCPU quota in mind.\n")
+ } else if licenseID == license.MarketplaceLicense {
+ // Do nothing. Billing is handled by the marketplace.
} else if licenseID == license.CommunityLicense {
cmd.Printf("For details, see https://docs.edgeless.systems/constellation/overview/license\n")
} else {
diff --git a/cli/internal/cmd/license_oss.go b/cli/internal/cmd/license_oss.go
index 8fba56114..fd14d35bc 100644
--- a/cli/internal/cmd/license_oss.go
+++ b/cli/internal/cmd/license_oss.go
@@ -17,4 +17,4 @@ import (
// with the license server. If no license file is present or if errors
// occur during the check, the user is informed and the community license
// is used. It is a no-op in the open source version of Constellation.
-func (a *applyCmd) checkLicenseFile(*cobra.Command, cloudprovider.Provider) {}
+func (a *applyCmd) checkLicenseFile(*cobra.Command, cloudprovider.Provider, bool) {}
diff --git a/cli/internal/cmd/maapatch.go b/cli/internal/cmd/maapatch.go
index a32e8729a..bb7ea381a 100644
--- a/cli/internal/cmd/maapatch.go
+++ b/cli/internal/cmd/maapatch.go
@@ -56,7 +56,7 @@ func runPatchMAA(cmd *cobra.Command, args []string) error {
}
func (c *maaPatchCmd) patchMAA(cmd *cobra.Command, attestationURL string) error {
- c.log.Debug(fmt.Sprintf("Using attestation URL %s", attestationURL))
+ c.log.Debug(fmt.Sprintf("Using attestation URL %q", attestationURL))
if err := c.patcher.Patch(cmd.Context(), attestationURL); err != nil {
return fmt.Errorf("patching MAA attestation policy: %w", err)
diff --git a/cli/internal/cmd/recover.go b/cli/internal/cmd/recover.go
index f3efc3e96..4541fd08d 100644
--- a/cli/internal/cmd/recover.go
+++ b/cli/internal/cmd/recover.go
@@ -84,7 +84,7 @@ func runRecover(cmd *cobra.Command, _ []string) error {
if err := r.flags.parse(cmd.Flags()); err != nil {
return err
}
- r.log.Debug(fmt.Sprintf("Using flags: %+v", r.flags))
+ r.log.Debug("Using flags", "debug", r.flags.debug, "endpoint", r.flags.endpoint, "force", r.flags.force)
return r.recover(cmd, fileHandler, 5*time.Second, &recoverDoer{log: r.log}, newDialer)
}
@@ -93,7 +93,7 @@ func (r *recoverCmd) recover(
doer recoverDoerInterface, newDialer func(validator atls.Validator) *dialer.Dialer,
) error {
var masterSecret uri.MasterSecret
- r.log.Debug(fmt.Sprintf("Loading master secret file from %s", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename)))
+ r.log.Debug(fmt.Sprintf("Loading master secret file from %q", r.flags.pathPrefixer.PrefixPrintablePath(constants.MasterSecretFilename)))
if err := fileHandler.ReadJSON(constants.MasterSecretFilename, &masterSecret); err != nil {
return err
}
@@ -108,7 +108,7 @@ func (r *recoverCmd) recover(
return err
}
- r.log.Debug(fmt.Sprintf("Got provider %s", conf.GetProvider()))
+ r.log.Debug(fmt.Sprintf("Got provider %q", conf.GetProvider()))
if conf.GetProvider() == cloudprovider.Azure {
interval = 20 * time.Second // Azure LB takes a while to remove unhealthy instances
}
@@ -129,14 +129,14 @@ func (r *recoverCmd) recover(
conf.UpdateMAAURL(stateFile.Infrastructure.Azure.AttestationURL)
}
- r.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()))
+ r.log.Debug(fmt.Sprintf("Creating aTLS Validator for %q", conf.GetAttestationConfig().GetVariant()))
validator, err := choose.Validator(conf.GetAttestationConfig(), warnLogger{cmd: cmd, log: r.log})
if err != nil {
return fmt.Errorf("creating new validator: %w", err)
}
r.log.Debug("Created a new validator")
doer.setDialer(newDialer(validator), endpoint)
- r.log.Debug(fmt.Sprintf("Set dialer for endpoint %s", endpoint))
+ r.log.Debug(fmt.Sprintf("Set dialer for endpoint %q", endpoint))
doer.setURIs(masterSecret.EncodeToURI(), uri.NoStoreURI)
r.log.Debug("Set secrets")
if err := r.recoverCall(cmd.Context(), cmd.OutOrStdout(), interval, doer); err != nil {
@@ -166,7 +166,7 @@ func (r *recoverCmd) recoverCall(ctx context.Context, out io.Writer, interval ti
})
}
- r.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", retry, err))
+ r.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %q", retry, err))
return retry
}
diff --git a/cli/internal/cmd/upgradeapply_test.go b/cli/internal/cmd/upgradeapply_test.go
index 8cf546c37..f396cc828 100644
--- a/cli/internal/cmd/upgradeapply_test.go
+++ b/cli/internal/cmd/upgradeapply_test.go
@@ -376,9 +376,9 @@ type mockApplier struct {
}
func (m *mockApplier) PrepareHelmCharts(
- helmOpts helm.Options, stateFile *state.State, str string, masterSecret uri.MasterSecret, openStackCfg *config.OpenStackConfig,
+ helmOpts helm.Options, stateFile *state.State, str string, masterSecret uri.MasterSecret,
) (helm.Applier, bool, error) {
- args := m.Called(helmOpts, stateFile, helmOpts, str, masterSecret, openStackCfg)
+ args := m.Called(helmOpts, stateFile, helmOpts, str, masterSecret)
return args.Get(0).(helm.Applier), args.Bool(1), args.Error(2)
}
diff --git a/cli/internal/cmd/upgradecheck.go b/cli/internal/cmd/upgradecheck.go
index 74ec31e08..a782ebef2 100644
--- a/cli/internal/cmd/upgradecheck.go
+++ b/cli/internal/cmd/upgradecheck.go
@@ -187,7 +187,7 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco
// get current image version of the cluster
csp := conf.GetProvider()
attestationVariant := conf.GetAttestationConfig().GetVariant()
- u.log.Debug(fmt.Sprintf("Using provider %s with attestation variant %s", csp.String(), attestationVariant.String()))
+ u.log.Debug(fmt.Sprintf("Using provider %q with attestation variant %q", csp.String(), attestationVariant.String()))
current, err := u.collect.currentVersions(cmd.Context())
if err != nil {
@@ -198,12 +198,12 @@ func (u *upgradeCheckCmd) upgradeCheck(cmd *cobra.Command, fetcher attestationco
if err != nil {
return err
}
- u.log.Debug(fmt.Sprintf("Current cli version: %s", current.cli))
- u.log.Debug(fmt.Sprintf("Supported cli version(s): %s", supported.cli))
- u.log.Debug(fmt.Sprintf("Current service version: %s", current.service))
- u.log.Debug(fmt.Sprintf("Supported service version: %s", supported.service))
- u.log.Debug(fmt.Sprintf("Current k8s version: %s", current.k8s))
- u.log.Debug(fmt.Sprintf("Supported k8s version(s): %s", supported.k8s))
+ u.log.Debug(fmt.Sprintf("Current cli version: %q", current.cli))
+ u.log.Debug(fmt.Sprintf("Supported cli version(s): %q", supported.cli))
+ u.log.Debug(fmt.Sprintf("Current service version: %q", current.service))
+ u.log.Debug(fmt.Sprintf("Supported service version: %q", supported.service))
+ u.log.Debug(fmt.Sprintf("Current k8s version: %q", current.k8s))
+ u.log.Debug(fmt.Sprintf("Supported k8s version(s): %q", supported.k8s))
// Filter versions to only include upgrades
newServices := supported.service
@@ -343,7 +343,7 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide
// get expected measurements for each image
upgrades := make(map[string]measurements.M)
for _, version := range versions {
- v.log.Debug(fmt.Sprintf("Fetching measurements for image: %s", version.Version()))
+ v.log.Debug(fmt.Sprintf("Fetching measurements for image: %q", version.Version()))
shortPath := version.ShortPath()
publicKey, err := keyselect.CosignPublicKeyForVersion(version)
@@ -363,8 +363,8 @@ func (v *versionCollector) newMeasurements(ctx context.Context, csp cloudprovide
continue
}
upgrades[shortPath] = measurements
+ v.log.Debug("Compatible image measurement found", shortPath, measurements.String())
}
- v.log.Debug(fmt.Sprintf("Compatible image measurements are %v", upgrades))
return upgrades, nil
}
@@ -452,9 +452,9 @@ func (v *versionCollector) newImages(ctx context.Context, currentImageVersion co
if err != nil {
return nil, fmt.Errorf("calculating next image minor version: %w", err)
}
- v.log.Debug(fmt.Sprintf("Current image minor version is %s", currentImageMinorVer))
- v.log.Debug(fmt.Sprintf("Current CLI minor version is %s", currentCLIMinorVer))
- v.log.Debug(fmt.Sprintf("Next image minor version is %s", nextImageMinorVer))
+ v.log.Debug(fmt.Sprintf("Current image minor version is %q", currentImageMinorVer))
+ v.log.Debug(fmt.Sprintf("Current CLI minor version is %q", currentCLIMinorVer))
+ v.log.Debug(fmt.Sprintf("Next image minor version is %q", nextImageMinorVer))
allowedMinorVersions := []string{currentImageMinorVer, nextImageMinorVer}
switch cliImageCompare := semver.Compare(currentCLIMinorVer, currentImageMinorVer); {
@@ -493,7 +493,7 @@ func (v *versionCollector) newerVersions(ctx context.Context, allowedVersions []
patchList, err := v.verListFetcher.FetchVersionList(ctx, patchList)
var notFound *fetcher.NotFoundError
if errors.As(err, ¬Found) {
- v.log.Debug(fmt.Sprintf("Skipping version: %s", err))
+ v.log.Debug(fmt.Sprintf("Skipping version: %q", err))
continue
}
if err != nil {
@@ -603,7 +603,7 @@ func getCompatibleImageMeasurements(ctx context.Context, writer io.Writer, clien
}
var fetchedMeasurements measurements.M
- log.Debug(fmt.Sprintf("Fetching for measurement url: %s", measurementsURL))
+ log.Debug(fmt.Sprintf("Fetching for measurement url: %q", measurementsURL))
hash, err := fetchedMeasurements.FetchAndVerify(
ctx, client, cosign,
@@ -657,7 +657,7 @@ func (v *versionCollector) newCLIVersions(ctx context.Context) ([]consemver.Semv
return nil, fmt.Errorf("parsing version %s: %w", version, err)
}
if err := target.IsUpgradeTo(v.cliVersion); err != nil {
- v.log.Debug(fmt.Sprintf("Skipping incompatible minor version %q: %s", version, err))
+ v.log.Debug(fmt.Sprintf("Skipping incompatible minor version %q: %q", version, err))
continue
}
list := versionsapi.List{
@@ -691,7 +691,7 @@ func (v *versionCollector) filterCompatibleCLIVersions(ctx context.Context, cliP
var compatibleVersions []consemver.Semver
for _, version := range cliPatchVersions {
if err := version.IsUpgradeTo(v.cliVersion); err != nil {
- v.log.Debug(fmt.Sprintf("Skipping incompatible patch version %q: %s", version, err))
+ v.log.Debug(fmt.Sprintf("Skipping incompatible patch version %q: %q", version, err))
continue
}
req := versionsapi.CLIInfo{
diff --git a/cli/internal/cmd/verify.go b/cli/internal/cmd/verify.go
index 049f02293..f80d1128d 100644
--- a/cli/internal/cmd/verify.go
+++ b/cli/internal/cmd/verify.go
@@ -128,7 +128,7 @@ func runVerify(cmd *cobra.Command, _ []string) error {
if err := v.flags.parse(cmd.Flags()); err != nil {
return err
}
- v.log.Debug(fmt.Sprintf("Using flags: %+v", v.flags))
+ v.log.Debug("Using flags", "clusterID", v.flags.clusterID, "endpoint", v.flags.endpoint, "ownerID", v.flags.ownerID)
fetcher := attestationconfigapi.NewFetcher()
return v.verify(cmd, verifyClient, formatterFactory, fetcher)
}
@@ -175,7 +175,7 @@ func (c *verifyCmd) verify(cmd *cobra.Command, verifyClient verifyClient, factor
return fmt.Errorf("updating expected PCRs: %w", err)
}
- c.log.Debug(fmt.Sprintf("Creating aTLS Validator for %s", conf.GetAttestationConfig().GetVariant()))
+ c.log.Debug(fmt.Sprintf("Creating aTLS Validator for %q", conf.GetAttestationConfig().GetVariant()))
validator, err := choose.Validator(attConfig, warnLogger{cmd: cmd, log: c.log})
if err != nil {
return fmt.Errorf("creating aTLS validator: %w", err)
diff --git a/cli/internal/terraform/variables.go b/cli/internal/terraform/variables.go
index f48ae0d88..a83818260 100644
--- a/cli/internal/terraform/variables.go
+++ b/cli/internal/terraform/variables.go
@@ -278,20 +278,16 @@ type OpenStackClusterVariables struct {
Name string `hcl:"name" cty:"name"`
// NodeGroups is a map of node groups to create.
NodeGroups map[string]OpenStackNodeGroup `hcl:"node_groups" cty:"node_groups"`
- // Cloud is the (optional) name of the OpenStack cloud to use when reading the "clouds.yaml" configuration file. If empty, environment variables are used.
+ // Cloud is the name of the OpenStack cloud to use when reading the "clouds.yaml" configuration file. If empty, environment variables are used.
Cloud *string `hcl:"cloud" cty:"cloud"`
+ // OpenStackCloudsYAMLPath is the path to the OpenStack clouds.yaml file
+ OpenStackCloudsYAMLPath string `hcl:"openstack_clouds_yaml_path" cty:"openstack_clouds_yaml_path"`
// (STACKIT only) STACKITProjectID is the ID of the STACKIT project to use.
STACKITProjectID string `hcl:"stackit_project_id" cty:"stackit_project_id"`
// FloatingIPPoolID is the ID of the OpenStack floating IP pool to use for public IPs.
FloatingIPPoolID string `hcl:"floating_ip_pool_id" cty:"floating_ip_pool_id"`
// ImageID is the ID of the OpenStack image to use.
ImageID string `hcl:"image_id" cty:"image_id"`
- // OpenstackUserDomainName is the OpenStack user domain name to use.
- OpenstackUserDomainName string `hcl:"openstack_user_domain_name" cty:"openstack_user_domain_name"`
- // OpenstackUsername is the OpenStack user name to use.
- OpenstackUsername string `hcl:"openstack_username" cty:"openstack_username"`
- // OpenstackPassword is the OpenStack password to use.
- OpenstackPassword string `hcl:"openstack_password" cty:"openstack_password"`
// Debug is true if debug mode is enabled.
Debug bool `hcl:"debug" cty:"debug"`
// CustomEndpoint is the (optional) custom dns hostname for the kubernetes api server.
diff --git a/cli/internal/terraform/variables_test.go b/cli/internal/terraform/variables_test.go
index 56940e976..df27ddb59 100644
--- a/cli/internal/terraform/variables_test.go
+++ b/cli/internal/terraform/variables_test.go
@@ -254,11 +254,9 @@ func TestOpenStackClusterVariables(t *testing.T) {
vars := OpenStackClusterVariables{
Name: "cluster-name",
Cloud: toPtr("my-cloud"),
+ OpenStackCloudsYAMLPath: "~/.config/openstack/clouds.yaml",
FloatingIPPoolID: "fip-pool-0123456789abcdef",
ImageID: "8e10b92d-8f7a-458c-91c6-59b42f82ef81",
- OpenstackUserDomainName: "my-user-domain",
- OpenstackUsername: "my-username",
- OpenstackPassword: "my-password",
Debug: true,
STACKITProjectID: "my-stackit-project-id",
NodeGroups: map[string]OpenStackNodeGroup{
@@ -287,12 +285,10 @@ node_groups = {
}
}
cloud = "my-cloud"
+openstack_clouds_yaml_path = "~/.config/openstack/clouds.yaml"
stackit_project_id = "my-stackit-project-id"
floating_ip_pool_id = "fip-pool-0123456789abcdef"
image_id = "8e10b92d-8f7a-458c-91c6-59b42f82ef81"
-openstack_user_domain_name = "my-user-domain"
-openstack_username = "my-username"
-openstack_password = "my-password"
debug = true
custom_endpoint = "example.com"
internal_load_balancer = false
diff --git a/debugd/service/debugd.pb.go b/debugd/service/debugd.pb.go
index cf7637ffd..fb95a1221 100644
--- a/debugd/service/debugd.pb.go
+++ b/debugd/service/debugd.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.22.1
// source: debugd/service/debugd.proto
diff --git a/disk-mapper/cmd/main.go b/disk-mapper/cmd/main.go
index 56b1c1812..f20bf9cfa 100644
--- a/disk-mapper/cmd/main.go
+++ b/disk-mapper/cmd/main.go
@@ -12,9 +12,11 @@ import (
"fmt"
"io"
"log/slog"
+ "log/syslog"
"net"
"os"
"path/filepath"
+ "time"
"github.com/edgelesssys/constellation/v2/disk-mapper/internal/diskencryption"
"github.com/edgelesssys/constellation/v2/disk-mapper/internal/recoveryserver"
@@ -48,6 +50,21 @@ const (
)
func main() {
+ runErr := run()
+ if runErr == nil {
+ return
+ }
+ syslogWriter, err := syslog.New(syslog.LOG_EMERG|syslog.LOG_KERN, "disk-mapper")
+ if err != nil {
+ os.Exit(1)
+ }
+ _ = syslogWriter.Err(runErr.Error())
+ _ = syslogWriter.Emerg("disk-mapper has failed. In most cases, this is due to a misconfiguration or transient error with the infrastructure.")
+ time.Sleep(time.Minute) // sleep to allow the message to be written to syslog and seen by the user
+ os.Exit(1)
+}
+
+func run() error {
csp := flag.String("csp", "", "Cloud Service Provider the image is running on")
verbosity := flag.Int("v", 0, logger.CmdLineVerbosityDescription)
@@ -60,12 +77,12 @@ func main() {
attestVariant, err := variant.FromString(os.Getenv(constants.AttestationVariant))
if err != nil {
log.With(slog.Any("error", err)).Error("Failed to parse attestation variant")
- os.Exit(1)
+ return err
}
issuer, err := choose.Issuer(attestVariant, log)
if err != nil {
log.With(slog.Any("error", err)).Error("Failed to select issuer")
- os.Exit(1)
+ return err
}
// set up metadata API
@@ -78,36 +95,36 @@ func main() {
diskPath, err = filepath.EvalSymlinks(awsStateDiskPath)
if err != nil {
log.With(slog.Any("error", err)).Error("Unable to resolve Azure state disk path")
- os.Exit(1)
+ return err
}
metadataClient, err = awscloud.New(context.Background())
if err != nil {
log.With(slog.Any("error", err)).Error("Failed to set up AWS metadata client")
- os.Exit(1)
+ return err
}
case cloudprovider.Azure:
diskPath, err = filepath.EvalSymlinks(azureStateDiskPath)
if err != nil {
log.With(slog.Any("error", err)).Error("Unable to resolve Azure state disk path")
- os.Exit(1)
+ return err
}
metadataClient, err = azurecloud.New(context.Background())
if err != nil {
log.With(slog.Any("error", err)).Error("Failed to set up Azure metadata client")
- os.Exit(1)
+ return err
}
case cloudprovider.GCP:
diskPath, err = filepath.EvalSymlinks(gcpStateDiskPath)
if err != nil {
log.With(slog.Any("error", err)).Error("Unable to resolve GCP state disk path")
- os.Exit(1)
+ return err
}
gcpMeta, err := gcpcloud.New(context.Background())
if err != nil {
log.With(slog.Any("error", err)).Error(("Failed to create GCP metadata client"))
- os.Exit(1)
+ return err
}
defer gcpMeta.Close()
metadataClient = gcpMeta
@@ -117,7 +134,7 @@ func main() {
metadataClient, err = openstack.New(context.Background())
if err != nil {
log.With(slog.Any("error", err)).Error(("Failed to create OpenStack metadata client"))
- os.Exit(1)
+ return err
}
case cloudprovider.QEMU:
@@ -126,14 +143,14 @@ func main() {
default:
log.Error(fmt.Sprintf("CSP %s is not supported by Constellation", *csp))
- os.Exit(1)
+ return err
}
// initialize device mapper
mapper, free, err := diskencryption.New(diskPath, log)
if err != nil {
log.With(slog.Any("error", err)).Error(("Failed to initialize device mapper"))
- os.Exit(1)
+ return err
}
defer free()
@@ -156,7 +173,7 @@ func main() {
if err := setupManger.LogDevices(); err != nil {
log.With(slog.Any("error", err)).Error(("Failed to log devices"))
- os.Exit(1)
+ return err
}
// prepare the state disk
@@ -166,7 +183,7 @@ func main() {
self, err = metadataClient.Self(context.Background())
if err != nil {
log.With(slog.Any("error", err)).Error(("Failed to get self metadata"))
- os.Exit(1)
+ return err
}
rejoinClient := rejoinclient.New(
dialer.New(issuer, nil, &net.Dialer{}),
@@ -189,6 +206,7 @@ func main() {
}
if err != nil {
log.With(slog.Any("error", err)).Error(("Failed to prepare state disk"))
- os.Exit(1)
+ return err
}
+ return nil
}
diff --git a/disk-mapper/recoverproto/recover.pb.go b/disk-mapper/recoverproto/recover.pb.go
index fa62e6d69..2a22120de 100644
--- a/disk-mapper/recoverproto/recover.pb.go
+++ b/disk-mapper/recoverproto/recover.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.22.1
// source: disk-mapper/recoverproto/recover.proto
diff --git a/docs/docs/architecture/attestation.md b/docs/docs/architecture/attestation.md
index 04b85d8ad..572a8511f 100644
--- a/docs/docs/architecture/attestation.md
+++ b/docs/docs/architecture/attestation.md
@@ -217,6 +217,38 @@ The latter means that the value can be generated offline and compared to the one
| 15 | ClusterID | Constellation Bootstrapper | Yes |
| 16–23 | Unused | - | - |
+
+
+
+Constellation uses a hypervisor-based vTPM for runtime measurements.
+
+The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification.
+The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot).
+
+The following table lists all PCR values of the vTPM and the measured components.
+It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable.
+The latter means that the value can be generated offline and compared to the one in the vTPM.
+
+| PCR | Components | Measured by | Reproducible and verifiable |
+| ----------- | ---------------------------------------------------------------- | -------------------------------------- | --------------------------- |
+| 0 | Firmware | STACKIT | No |
+| 1 | Firmware | STACKIT | No |
+| 2 | Firmware | STACKIT | No |
+| 3 | Firmware | STACKIT | No |
+| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | STACKIT, Constellation Bootloader | Yes |
+| 5 | Firmware | STACKIT | No |
+| 6 | Firmware | STACKIT | No |
+| 7 | Secure Boot Policy | STACKIT, Constellation Bootloader | No |
+| 8 | - | - | - |
+| 9 | initramfs, Kernel command line | Linux Kernel | Yes |
+| 10 | User space | Linux IMA | No[^1] |
+| 11 | Unified Kernel Image components | Constellation Bootloader | Yes |
+| 12 | Reserved | (User space, Constellation Bootloader) | Yes |
+| 13 | Reserved | (Constellation Bootloader) | Yes |
+| 14 | Secure Boot State | Constellation Bootloader | No |
+| 15 | ClusterID | Constellation Bootstrapper | Yes |
+| 16–23 | Unused | - | - |
+
@@ -251,13 +283,15 @@ You may customize certain parameters for verification of the attestation stateme
+On GCP, AMD SEV-ES is used to provide runtime encryption to the VMs.
+The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements).
There is no additional configuration available for GCP.
On AWS, AMD SEV-SNP is used to provide runtime encryption to the VMs.
-An SEV-SNP attestation report is used to establish trust in the VM and it's vTPM.
+An SEV-SNP attestation report is used to establish trust in the VM.
You may customize certain parameters for verification of the attestation statement using the Constellation config file.
* TCB versions
@@ -275,6 +309,13 @@ You may customize certain parameters for verification of the attestation stateme
This is the intermediate certificate for verifying the SEV-SNP report's signature.
If it's not specified, the CLI fetches it from the AMD key distribution server.
+
+
+
+On STACKIT, AMD SEV-ES is used to provide runtime encryption to the VMs.
+The hypervisor-based vTPM is used to establish trust in the VM via [runtime measurements](#runtime-measurements).
+There is no additional configuration available for STACKIT.
+
diff --git a/docs/docs/getting-started/install.md b/docs/docs/getting-started/install.md
index 8d41e3c8e..9d35c912b 100644
--- a/docs/docs/getting-started/install.md
+++ b/docs/docs/getting-started/install.md
@@ -6,7 +6,7 @@ Constellation runs entirely in your cloud environment and can be controlled via
Make sure the following requirements are met:
-* Your machine is running Linux or macOS
+* Your machine is running Linux, macOS, or Windows
* You have admin rights on your machine
* [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed
* Your CSP is Microsoft Azure, Google Cloud Platform (GCP), Amazon Web Services (AWS), or STACKIT
@@ -92,6 +92,29 @@ curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/c
sudo install constellation-darwin-amd64 /usr/local/bin/constellation
```
+
+
+
+
+1. Download the CLI:
+
+```bash
+Invoke-WebRequest -OutFile ./constellation.exe -Uri 'https://github.com/edgelesssys/constellation/releases/latest/download/constellation-windows-amd64.exe'
+```
+
+2. [Verify the signature](../workflows/verify-cli.md) (optional)
+
+3. Install the CLI under `C:\Program Files\Constellation\bin\constellation.exe`
+
+3. Add the CLI to your PATH:
+
+ 1. Open `Advanced system settings` by searching for the App in the Windows search
+ 2. Go to the `Advanced` tab
+ 3. Click `Environment Variables…`
+ 4. Click variable called `Path` and click `Edit…`
+ 5. Click `New`
+ 6. Enter the path to the folder containing the binary you want on your PATH: `C:\Program Files\Constellation\bin`
+
@@ -374,7 +397,7 @@ Options and first steps are described in the [AWS CLI documentation](https://doc
You need to authenticate with the infrastructure API (OpenStack) and create a service account (STACKIT API).
1. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/step-1-generating-of-user-access-token-11763726.html) for obtaining a User Access Token (UAT) to use the infrastructure API
-2. Create a configuration file under `~/.config/openstack/clouds.yaml` with the credentials from the User Access Token
+2. Create a configuration file under `~/.config/openstack/clouds.yaml` (`%AppData%\openstack\clouds.yaml` on Windows) with the credentials from the User Access Token
```yaml
clouds:
stackit:
@@ -391,7 +414,7 @@ You need to authenticate with the infrastructure API (OpenStack) and create a se
```
3. [Follow the STACKIT documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html) for creating a service account and an access token
4. Assign the `editor` role to the service account by [following the documentation](https://docs.stackit.cloud/stackit/en/getting-started-in-service-accounts-134415831.html)
-5. Create a configuration file under `~/.stackit/credentials.json`
+5. Create a configuration file under `~/.stackit/credentials.json` (`%USERPROFILE%\.stackit\credentials.json` on Windows)
```json
{"STACKIT_SERVICE_ACCOUNT_TOKEN":"REPLACE_WITH_TOKEN"}
```
diff --git a/docs/docs/overview/clouds.md b/docs/docs/overview/clouds.md
index 8cc42a990..b2de81e4b 100644
--- a/docs/docs/overview/clouds.md
+++ b/docs/docs/overview/clouds.md
@@ -14,13 +14,13 @@ For Constellation, the ideal environment provides the following:
The following table summarizes the state of features for different infrastructures as of June 2023.
-| **Feature** | **Azure** | **GCP** | **AWS** | **OpenStack (Yoga)** |
-|-----------------------------------|-----------|---------|---------|----------------------|
-| **1. Custom images** | Yes | Yes | Yes | Yes |
-| **2. SEV-SNP or TDX** | Yes | Yes | Yes | Depends on kernel/HV |
-| **3. Raw guest attestation** | Yes | Yes | Yes | Depends on kernel/HV |
-| **4. Reviewable firmware** | No | No | Yes | Depends on kernel/HV |
-| **5. Confidential measured boot** | Yes | No | No | Depends on kernel/HV |
+| **Feature** | **Azure** | **GCP** | **AWS** | **STACKIT** | **OpenStack (Yoga)** |
+|-----------------------------------|-----------|---------|---------|--------------|----------------------|
+| **1. Custom images** | Yes | Yes | Yes | Yes | Yes |
+| **2. SEV-SNP or TDX** | Yes | Yes | Yes | No | Depends on kernel/HV |
+| **3. Raw guest attestation** | Yes | Yes | Yes | No | Depends on kernel/HV |
+| **4. Reviewable firmware** | No | No | Yes | No | Depends on kernel/HV |
+| **5. Confidential measured boot** | Yes | No | No | No | Depends on kernel/HV |
## Microsoft Azure
@@ -53,6 +53,10 @@ However, regarding (5), attestation is partially based on the [NitroTPM](https:/
Hence, the hypervisor is currently part of Constellation's TCB.
Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built.
+## STACKIT
+
+[STACKIT Compute Engine](https://www.stackit.de/en/product/stackit-compute-engine/) supports AMD SEV-ES. A vTPM is used for measured boot, which is a vTPM managed by STACKIT's hypervisor. Hence, the hypervisor is currently part of Constellation's TCB.
+
## OpenStack
OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation.
diff --git a/docs/docs/workflows/config.md b/docs/docs/workflows/config.md
index 872504834..7e8933466 100644
--- a/docs/docs/workflows/config.md
+++ b/docs/docs/workflows/config.md
@@ -77,17 +77,17 @@ The Constellation CLI can also print the supported instance types with: `constel
-By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 32 GB RAM) to create your cluster.
+By default, Constellation uses `m1a.4cd` VMs (4 vCPUs, 30 GB RAM) to create your cluster.
Optionally, you can switch to a different VM type by modifying `instanceType` in the configuration file.
The following instance types are known to be supported:
| name | vCPUs | GB RAM |
|----------|-------|--------|
-| m1a.4cd | 4 | 32 |
-| m1a.8cd | 8 | 64 |
+| m1a.4cd | 4 | 30 |
+| m1a.8cd | 8 | 60 |
| m1a.16cd | 16 | 120 |
-| m1a.30cd | 30 | 238 |
+| m1a.30cd | 30 | 230 |
You can choose any of the SEV-enabled instance types. You can find a list of all supported instance types in the [STACKIT documentation](https://docs.stackit.cloud/stackit/en/virtual-machine-flavors-75137231.html).
@@ -135,7 +135,7 @@ This configuration creates an additional node group `high_cpu` with a larger ins
You can use the field `zone` to specify what availability zone nodes of the group are placed in.
On Azure, this field is empty by default and nodes are automatically spread across availability zones.
-STACKIT currently only offers SEV-enabled CPUs in the `eu01-1` zone.
+STACKIT currently offers SEV-enabled CPUs in the `eu01-1`, `eu01-2` and `eu01-3` zone.
Consult the documentation of your cloud provider for more information:
* [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/)
diff --git a/docs/docs/workflows/lb.md b/docs/docs/workflows/lb.md
index 11e403237..868e61076 100644
--- a/docs/docs/workflows/lb.md
+++ b/docs/docs/workflows/lb.md
@@ -4,12 +4,25 @@ Constellation integrates the native load balancers of each CSP. Therefore, to ex
## Internet-facing LB service on AWS
-To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancing Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS.
+To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancer Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS.
-Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/service/nlb/).
+Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/service/nlb/).
For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html).
:::caution
Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources.
:::
+
+## Ingress on AWS
+
+The AWS Load Balancer Controller also provisions `Ingress` resources of class `alb`.
+AWS Application Load Balancers (ALBs) can be configured with a [`target-type`](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.7/guide/ingress/annotations/#target-type).
+The target type `ip` requires using the EKS container network solution, which makes it incompatible with Constellation.
+If a service can be exposed on a `NodePort`, the target type `instance` can be used.
+
+See [Application load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for more information.
+
+:::caution
+Ingress handlers backed by AWS ALBs reside outside the Constellation cluster, so they shouldn't be handling sensitive traffic!
+:::
diff --git a/docs/docs/workflows/recovery.md b/docs/docs/workflows/recovery.md
index 9396bf8f2..9bbb32652 100644
--- a/docs/docs/workflows/recovery.md
+++ b/docs/docs/workflows/recovery.md
@@ -118,6 +118,37 @@ If this fails due to an unhealthy control plane, you will see log messages simil
This means that you have to recover the node manually.
+
+
+
+First, open the STACKIT portal to view all servers in your project. Select individual control plane nodes `--control-plane--` and check that enough members are in a *Running* state.
+
+Second, check the boot logs of these servers. Click on a server name and select **Overview**. Find the **Machine Setup** section and click on **Web console** > **Open console**.
+
+In the serial console output, search for `Waiting for decryption key`.
+Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk):
+
+```json
+{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"}
+{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"}
+{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"}
+{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"}
+```
+
+The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key.
+If this fails due to an unhealthy control plane, you will see log messages similar to the following:
+
+```json
+{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]}
+{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"}
+{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"}
+{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"}
+{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"}
+{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"}
+```
+
+This means that you have to recover the node manually.
+
diff --git a/docs/docs/workflows/terraform-provider.md b/docs/docs/workflows/terraform-provider.md
index 7de44a530..78e63f326 100644
--- a/docs/docs/workflows/terraform-provider.md
+++ b/docs/docs/workflows/terraform-provider.md
@@ -78,6 +78,17 @@ This example shows how to set up a Constellation cluster with the reference IAM
Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios.
+
+ Initialize the providers and apply the configuration.
+
+ ```bash
+ terraform init
+ terraform apply
+ ```
+
+ Optionally, you can prefix the `terraform apply` command with `TF_LOG=INFO` to collect [Terraform logs](https://developer.hashicorp.com/terraform/internals/debugging) while applying the configuration. This may provide helpful output in debugging scenarios.
+
+
4. Connect to the cluster.
diff --git a/docs/docs/workflows/verify-cli.md b/docs/docs/workflows/verify-cli.md
index 1280c51b0..78341f314 100644
--- a/docs/docs/workflows/verify-cli.md
+++ b/docs/docs/workflows/verify-cli.md
@@ -33,6 +33,10 @@ You don't need to verify the Constellation node images. This is done automatical
## Verify the signature
+:::info
+This guide assumes Linux on an amd64 processor. The exact steps for other platforms differ slightly.
+:::
+
First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example:
```shell-session
diff --git a/docs/styles/config/vocabularies/edgeless/accept.txt b/docs/styles/config/vocabularies/edgeless/accept.txt
index 6220f0553..26fa0d0c9 100644
--- a/docs/styles/config/vocabularies/edgeless/accept.txt
+++ b/docs/styles/config/vocabularies/edgeless/accept.txt
@@ -63,6 +63,7 @@ rollout
SBOM
sigstore
SSD
+STACKIT
superset
Syft
systemd
diff --git a/e2e/malicious-join/malicious-join.go b/e2e/malicious-join/malicious-join.go
index 981035880..2ef649771 100644
--- a/e2e/malicious-join/malicious-join.go
+++ b/e2e/malicious-join/malicious-join.go
@@ -155,13 +155,13 @@ type maliciousJoiner struct {
// join issues a join request to the join service endpoint.
func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketResponse, error) {
- j.logger.Debug(fmt.Sprintf("Dialing join service endpoint %s", j.endpoint))
+ j.logger.Debug(fmt.Sprintf("Dialing join service endpoint %q", j.endpoint))
conn, err := j.dialer.Dial(ctx, j.endpoint)
if err != nil {
return nil, fmt.Errorf("dialing join service endpoint: %w", err)
}
defer conn.Close()
- j.logger.Debug(fmt.Sprintf("Successfully dialed join service endpoint %s", j.endpoint))
+ j.logger.Debug(fmt.Sprintf("Successfully dialed join service endpoint %q", j.endpoint))
protoClient := joinproto.NewAPIClient(conn)
@@ -172,7 +172,7 @@ func (j *maliciousJoiner) join(ctx context.Context) (*joinproto.IssueJoinTicketR
IsControlPlane: false,
}
res, err := protoClient.IssueJoinTicket(ctx, req)
- j.logger.Debug(fmt.Sprintf("Got join ticket response: %+v", res))
+ j.logger.Debug("Got join ticket response", "apiServerEndpoint", res.ApiServerEndpoint, "kubernetesVersion", res.KubernetesVersion)
if err != nil {
return nil, fmt.Errorf("issuing join ticket: %w", err)
}
diff --git a/go.mod b/go.mod
index 5bb697ee2..a40342a06 100644
--- a/go.mod
+++ b/go.mod
@@ -71,7 +71,7 @@ require (
github.com/bazelbuild/buildtools v0.0.0-20230317132445-9c3c1fc0106e
github.com/bazelbuild/rules_go v0.42.0
github.com/coreos/go-systemd/v22 v22.5.0
- github.com/docker/docker v25.0.3+incompatible
+ github.com/docker/docker v25.0.5+incompatible
github.com/edgelesssys/go-azguestattestation v0.0.0-20230707101700-a683be600fcf
github.com/edgelesssys/go-tdx-qpl v0.0.0-20240123150912-dcad3c41ec5f
github.com/foxboron/go-uefi v0.0.0-20240128152106-48be911532c2
@@ -126,15 +126,15 @@ require (
go.etcd.io/etcd/client/pkg/v3 v3.5.12
go.etcd.io/etcd/client/v3 v3.5.12
go.uber.org/goleak v1.3.0
- golang.org/x/crypto v0.19.0
+ golang.org/x/crypto v0.21.0
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a
golang.org/x/mod v0.15.0
- golang.org/x/sys v0.17.0
+ golang.org/x/sys v0.18.0
golang.org/x/text v0.14.0
golang.org/x/tools v0.18.0
google.golang.org/api v0.165.0
google.golang.org/grpc v1.61.1
- google.golang.org/protobuf v1.32.0
+ google.golang.org/protobuf v1.33.0
gopkg.in/yaml.v3 v3.0.1
helm.sh/helm v2.17.0+incompatible
helm.sh/helm/v3 v3.14.2
@@ -300,7 +300,7 @@ require (
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
- github.com/mitchellh/go-homedir v1.1.0 // indirect
+ github.com/mitchellh/go-homedir v1.1.0
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
@@ -360,10 +360,10 @@ require (
go.starlark.net v0.0.0-20240123142251-f86470692795 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
- golang.org/x/net v0.21.0 // indirect
+ golang.org/x/net v0.23.0 // indirect
golang.org/x/oauth2 v0.17.0 // indirect
golang.org/x/sync v0.6.0 // indirect
- golang.org/x/term v0.17.0 // indirect
+ golang.org/x/term v0.18.0 // indirect
golang.org/x/time v0.5.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
diff --git a/go.sum b/go.sum
index d868f4ccd..719d46ff0 100644
--- a/go.sum
+++ b/go.sum
@@ -240,8 +240,8 @@ github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6
github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
-github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE=
+github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
@@ -865,8 +865,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
-golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
-golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE=
golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
@@ -895,8 +895,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
-golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
@@ -935,15 +935,15 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
-golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
+golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
-golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
-golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -1008,8 +1008,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
-google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/go.work b/go.work
index 22d5025da..260ad61e4 100644
--- a/go.work
+++ b/go.work
@@ -1,6 +1,6 @@
-go 1.22.0
+go 1.22.3
-toolchain go1.22.0
+toolchain go1.22.3
use (
.
diff --git a/hack/bazel-deps-mirror/check.go b/hack/bazel-deps-mirror/check.go
index 64bd68b73..465e46f7f 100644
--- a/hack/bazel-deps-mirror/check.go
+++ b/hack/bazel-deps-mirror/check.go
@@ -40,7 +40,7 @@ func runCheck(cmd *cobra.Command, _ []string) error {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "mirror", flags.mirror, "mirrorUnauthenticated", flags.mirrorUnauthenticated)
filesHelper, err := bazelfiles.New()
if err != nil {
@@ -89,7 +89,7 @@ func runCheck(cmd *cobra.Command, _ []string) error {
}
func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCheck mirrorChecker, bazelFile bazelfiles.BazelFile, log *slog.Logger) (issByFile issues.ByFile, err error) {
- log.Debug(fmt.Sprintf("Checking file: %s", bazelFile.RelPath))
+ log.Debug(fmt.Sprintf("Checking file: %q", bazelFile.RelPath))
issByFile = issues.NewByFile()
buildfile, err := fileHelper.LoadFile(bazelFile)
if err != nil {
@@ -97,12 +97,12 @@ func checkBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorCh
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
- log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
+ log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath))
return issByFile, nil
}
- log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
+ log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath))
for _, rule := range found {
- log.Debug(fmt.Sprintf("Checking rule: %s", rule.Name()))
+ log.Debug(fmt.Sprintf("Checking rule: %q", rule.Name()))
// check if the rule is a valid pinned dependency rule (has all required attributes)
if issues := rules.ValidatePinned(rule); len(issues) > 0 {
issByFile.Add(rule.Name(), issues...)
diff --git a/hack/bazel-deps-mirror/fix.go b/hack/bazel-deps-mirror/fix.go
index 9a327ee27..dd231cd06 100644
--- a/hack/bazel-deps-mirror/fix.go
+++ b/hack/bazel-deps-mirror/fix.go
@@ -40,7 +40,7 @@ func runFix(cmd *cobra.Command, _ []string) error {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "unauthenticated", flags.unauthenticated, "dryRun", flags.dryRun)
fileHelper, err := bazelfiles.New()
if err != nil {
@@ -96,10 +96,10 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
- log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
+ log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath))
return iss, nil
}
- log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
+ log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath))
for _, rule := range found {
changedRule, ruleIssues := fixRule(ctx, mirrorUpload, rule, log)
if len(ruleIssues) > 0 {
@@ -113,7 +113,7 @@ func fixBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirrorUplo
return iss, nil
}
if !changed {
- log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath))
+ log.Debug(fmt.Sprintf("No changes to file: %q", bazelFile.RelPath))
return iss, nil
}
if dryRun {
@@ -142,12 +142,12 @@ func learnHashForRule(ctx context.Context, mirrorUpload mirrorUploader, rule *bu
return err
}
rules.SetHash(rule, learnedHash)
- log.Debug(fmt.Sprintf("Learned hash for rule %s: %s", rule.Name(), learnedHash))
+ log.Debug(fmt.Sprintf("Learned hash for rule %q: %q", rule.Name(), learnedHash))
return nil
}
func fixRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
- log.Debug(fmt.Sprintf("Fixing rule: %s", rule.Name()))
+ log.Debug(fmt.Sprintf("Fixing rule: %q", rule.Name()))
// try to learn the hash
if hash, err := rules.GetHash(rule); err != nil || hash == "" {
diff --git a/hack/bazel-deps-mirror/internal/mirror/mirror.go b/hack/bazel-deps-mirror/internal/mirror/mirror.go
index 1593cc298..a9919adcc 100644
--- a/hack/bazel-deps-mirror/internal/mirror/mirror.go
+++ b/hack/bazel-deps-mirror/internal/mirror/mirror.go
@@ -95,10 +95,10 @@ func (m *Maintainer) Mirror(ctx context.Context, hash string, urls []string) err
}
for _, url := range urls {
- m.log.Debug(fmt.Sprintf("Mirroring file with hash %v from %q", hash, url))
+ m.log.Debug(fmt.Sprintf("Mirroring file with hash %q from %q", hash, url))
body, err := m.downloadFromUpstream(ctx, url)
if err != nil {
- m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
+ m.log.Debug(fmt.Sprintf("Failed to download file from %q: %q", url, err))
continue
}
defer body.Close()
@@ -129,13 +129,13 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
m.log.Debug(fmt.Sprintf("Learning new hash from %q", url))
body, err := m.downloadFromUpstream(ctx, url)
if err != nil {
- m.log.Debug(fmt.Sprintf("Failed to download file from %q: %v", url, err))
+ m.log.Debug(fmt.Sprintf("Failed to download file from %q: %q", url, err))
continue
}
defer body.Close()
streamedHash := sha256.New()
if _, err := io.Copy(streamedHash, body); err != nil {
- m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %v", url, err))
+ m.log.Debug(fmt.Sprintf("Failed to stream file from %q: %q", url, err))
}
learnedHash := hex.EncodeToString(streamedHash.Sum(nil))
m.log.Debug(fmt.Sprintf("File successfully downloaded from %q with %q", url, learnedHash))
@@ -146,7 +146,7 @@ func (m *Maintainer) Learn(ctx context.Context, urls []string) (string, error) {
// Check checks if a file is present and has the correct hash in the CAS mirror.
func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
- m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %v", expectedHash))
+ m.log.Debug(fmt.Sprintf("Checking consistency of object with hash %q", expectedHash))
if m.unauthenticated {
return m.checkUnauthenticated(ctx, expectedHash)
}
@@ -157,7 +157,7 @@ func (m *Maintainer) Check(ctx context.Context, expectedHash string) error {
// It uses the authenticated CAS s3 endpoint to download the file metadata.
func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string) error {
key := path.Join(keyBase, expectedHash)
- m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %v, Key: %v}", m.bucket, key))
+ m.log.Debug(fmt.Sprintf("Check: s3 getObjectAttributes {Bucket: %q, Key: %q}", m.bucket, key))
attributes, err := m.objectStorageClient.GetObjectAttributes(ctx, &s3.GetObjectAttributesInput{
Bucket: &m.bucket,
Key: &key,
@@ -174,7 +174,7 @@ func (m *Maintainer) checkAuthenticated(ctx context.Context, expectedHash string
// checksums are not guaranteed to be present
// and if present, they are only meaningful for single part objects
// fallback if checksum cannot be verified from attributes
- m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %v. Falling back to download.", key))
+ m.log.Debug(fmt.Sprintf("S3 object attributes cannot be used to verify key %q. Falling back to download.", key))
return m.checkUnauthenticated(ctx, expectedHash)
}
@@ -192,7 +192,7 @@ func (m *Maintainer) checkUnauthenticated(ctx context.Context, expectedHash stri
if err != nil {
return err
}
- m.log.Debug(fmt.Sprintf("Check: http get {Url: %v}", pubURL))
+ m.log.Debug(fmt.Sprintf("Check: http get {Url: %q}", pubURL))
req, err := http.NewRequestWithContext(ctx, http.MethodGet, pubURL, http.NoBody)
if err != nil {
return err
@@ -221,10 +221,10 @@ func (m *Maintainer) put(ctx context.Context, hash string, data io.Reader) error
key := path.Join(keyBase, hash)
if m.dryRun {
- m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %v, Key: %v}", m.bucket, key))
+ m.log.Debug(fmt.Sprintf("DryRun: s3 put object {Bucket: %q, Key: %q}", m.bucket, key))
return nil
}
- m.log.Debug(fmt.Sprintf("Uploading object with hash %v to s3://%v/%v", hash, m.bucket, key))
+ m.log.Debug(fmt.Sprintf("Uploading object with hash %q to \"s3://%s/%s\"", hash, m.bucket, key))
_, err := m.uploadClient.Upload(ctx, &s3.PutObjectInput{
Bucket: &m.bucket,
Key: &key,
diff --git a/hack/bazel-deps-mirror/upgrade.go b/hack/bazel-deps-mirror/upgrade.go
index e2c07d5c2..8729f0aea 100644
--- a/hack/bazel-deps-mirror/upgrade.go
+++ b/hack/bazel-deps-mirror/upgrade.go
@@ -40,7 +40,7 @@ func runUpgrade(cmd *cobra.Command, _ []string) error {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "unauthenticated", flags.unauthenticated, "dryRun", flags.dryRun)
fileHelper, err := bazelfiles.New()
if err != nil {
@@ -96,10 +96,10 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
}
found := rules.Rules(buildfile, rules.SupportedRules)
if len(found) == 0 {
- log.Debug(fmt.Sprintf("No rules found in file: %s", bazelFile.RelPath))
+ log.Debug(fmt.Sprintf("No rules found in file: %q", bazelFile.RelPath))
return iss, nil
}
- log.Debug(fmt.Sprintf("Found %d rules in file: %s", len(found), bazelFile.RelPath))
+ log.Debug(fmt.Sprintf("Found %d rules in file: %q", len(found), bazelFile.RelPath))
for _, rule := range found {
changedRule, ruleIssues := upgradeRule(ctx, mirrorUpload, rule, log)
if len(ruleIssues) > 0 {
@@ -113,7 +113,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
return iss, nil
}
if !changed {
- log.Debug(fmt.Sprintf("No changes to file: %s", bazelFile.RelPath))
+ log.Debug(fmt.Sprintf("No changes to file: %q", bazelFile.RelPath))
return iss, nil
}
if dryRun {
@@ -133,7 +133,7 @@ func upgradeBazelFile(ctx context.Context, fileHelper *bazelfiles.Helper, mirror
}
func upgradeRule(ctx context.Context, mirrorUpload mirrorUploader, rule *build.Rule, log *slog.Logger) (changed bool, iss []error) {
- log.Debug(fmt.Sprintf("Upgrading rule: %s", rule.Name()))
+ log.Debug(fmt.Sprintf("Upgrading rule: %q", rule.Name()))
upstreamURLs, err := rules.UpstreamURLs(rule)
if errors.Is(err, rules.ErrNoUpstreamURL) {
diff --git a/hack/oci-pin/codegen.go b/hack/oci-pin/codegen.go
index 910056ed0..774b794da 100644
--- a/hack/oci-pin/codegen.go
+++ b/hack/oci-pin/codegen.go
@@ -45,14 +45,14 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "identifier", flags.identifier, "imageRepoTag", flags.imageRepoTag, "ociPath", flags.ociPath, "pkg", flags.pkg)
registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag)
if err != nil {
return fmt.Errorf("splitting OCI image reference %q: %w", flags.imageRepoTag, err)
}
- log.Debug(fmt.Sprintf("Generating Go code for OCI image %s.", name))
+ log.Debug(fmt.Sprintf("Generating Go code for OCI image %q.", name))
ociIndexPath := filepath.Join(flags.ociPath, "index.json")
index, err := os.Open(ociIndexPath)
@@ -78,7 +78,7 @@ func runCodegen(cmd *cobra.Command, _ []string) error {
return err
}
- log.Debug(fmt.Sprintf("OCI image digest: %s", digest))
+ log.Debug(fmt.Sprintf("OCI image digest: %q", digest))
if err := inject.Render(out, inject.PinningValues{
Package: flags.pkg,
diff --git a/hack/oci-pin/merge.go b/hack/oci-pin/merge.go
index 94bafd52b..565d08a35 100644
--- a/hack/oci-pin/merge.go
+++ b/hack/oci-pin/merge.go
@@ -36,7 +36,7 @@ func runMerge(cmd *cobra.Command, _ []string) error {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "inputs", flags.inputs, "output", flags.output, "logLevel", flags.logLevel)
log.Debug(fmt.Sprintf("Merging sum file from %q into %q.", flags.inputs, flags.output))
diff --git a/hack/oci-pin/sum.go b/hack/oci-pin/sum.go
index d6be5154a..feacd8ca1 100644
--- a/hack/oci-pin/sum.go
+++ b/hack/oci-pin/sum.go
@@ -42,14 +42,14 @@ func runSum(cmd *cobra.Command, _ []string) error {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "imageRepoTag", flags.imageRepoTag, "ociPath", flags.ociPath)
registry, prefix, name, tag, err := splitRepoTag(flags.imageRepoTag)
if err != nil {
return fmt.Errorf("splitting repo tag: %w", err)
}
- log.Debug(fmt.Sprintf("Generating sum file for OCI image %s.", name))
+ log.Debug(fmt.Sprintf("Generating sum file for OCI image %q.", name))
ociIndexPath := filepath.Join(flags.ociPath, "index.json")
index, err := os.Open(ociIndexPath)
@@ -75,7 +75,7 @@ func runSum(cmd *cobra.Command, _ []string) error {
return fmt.Errorf("extracting OCI image digest: %w", err)
}
- log.Debug(fmt.Sprintf("OCI image digest: %s", digest))
+ log.Debug(fmt.Sprintf("OCI image digest: %q", digest))
refs := []sums.PinnedImageReference{
{
diff --git a/hack/tools/go.mod b/hack/tools/go.mod
index a18f50a6d..a18b99dc7 100644
--- a/hack/tools/go.mod
+++ b/hack/tools/go.mod
@@ -46,7 +46,7 @@ require (
golang.org/x/sys v0.17.0 // indirect
golang.org/x/term v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect
- google.golang.org/protobuf v1.32.0 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect
gopkg.in/src-d/go-git.v4 v4.13.1 // indirect
diff --git a/hack/tools/go.sum b/hack/tools/go.sum
index 288fbb4bd..c0750a1c3 100644
--- a/hack/tools/go.sum
+++ b/hack/tools/go.sum
@@ -813,8 +813,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
-google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/image/mirror/SHA256SUMS b/image/mirror/SHA256SUMS
index 017c3465d..ba49e4366 100644
--- a/image/mirror/SHA256SUMS
+++ b/image/mirror/SHA256SUMS
@@ -68,7 +68,7 @@ b0fc6c55f5989aebf6e71279541206070b32b3b28b708a249bd3bdeaa6c088a4 filesystem-3.1
79986f917ef1bae7ca2378b16515ba44c19160f5a5eae4f6b697eda160bc26c1 findutils-4.9.0-3.fc38.x86_64.rpm
d5ae6a7d99826a17d163d9846c2705442b5792a7ccacc5169e4986cdf4b6bae2 fuse-common-3.14.1-1.fc38.x86_64.rpm
56df47937646df892dad25c6b9ae63d111328febfe86eb93096b8b0a11700b60 fuse-libs-2.9.9-16.fc38.x86_64.rpm
-1e9e8b6447c2650a306e8d107dbdcdaa4f81d4175012eea0c87846faecd64c70 fuse-overlayfs-1.12-1.fc38.x86_64.rpm
+088ebe20ac0854c1f216883aa1f6ed8dfc7844807455f4acfef05b7a4b8509db fuse-overlayfs-1.13-1.fc38.x86_64.rpm
55ca555fe815bd360b08500889b652f50fe4c56dfafbed0cc459f2362641f1a0 fuse3-3.14.1-1.fc38.x86_64.rpm
f54340fec047cc359a6a164a1ce88d0d7ffcd8f7d6334b50dc5b3d234e3a19ac fuse3-libs-3.14.1-1.fc38.x86_64.rpm
e607df61803999da46a199d23d4acadb45b290f29b5b644e583c5526d8081178 gawk-5.1.1-5.fc38.x86_64.rpm
@@ -256,7 +256,7 @@ dea697370ede1848c1a54fdccebf792155d98cbdc5de89e85bbc75ec7c94de8f p11-kit-trust-
21c59eeb1ad62c09aadca6a4168f927ff943f82e4f764d589f5acb2ab6efc993 pam-libs-1.5.2-16.fc38.i686.rpm
63e970f7b3f8c54e1dff90661c26519f32a4bf7486c40f2dd38d55e40660230e pam-libs-1.5.2-16.fc38.x86_64.rpm
8d846f866158409c775656b39e372d59cf224936d29972d3b6d14e40d3b832ca parted-3.5-11.fc38.x86_64.rpm
-5ab994e5589d48c9e600ef7a42c53653607d0c6455c55f739e07da7c3a483bf6 passt-0^20231230.gf091893-1.fc38.x86_64.rpm
+7a4cd426505349a948fbc5bcc24545fbdfb7807d525a9c5a41e75dd57b79dccf passt-0^20240220.g1e6f92b-1.fc38.x86_64.rpm
43603df046850c4cf067960d8e47998de5c33955b1f865df8d66f20c1b7f676a passwd-0.80-14.fc38.x86_64.rpm
f2737b94fa026a56c7a427f8f4221ff379ea4c4c32f2fff9d95a7a7836dcc6c7 pcre2-10.42-1.fc38.1.i686.rpm
cb1caf3e9a4ddc8343c0757c7a2730bf5de2b5f0b4c9ee7d928609566f64f010 pcre2-10.42-1.fc38.1.x86_64.rpm
@@ -341,7 +341,7 @@ cce5fcc8b6b0312caeca04a19494358888b00c125747f5c2d2bd8f006665c730 vim-common-9.1
5fa001dbcd0752e75421b2e96aabb73265a48cdd646b02dc947da768147f2be8 vim-data-9.1.113-1.fc38.noarch.rpm
545d77bb579a8fb3e87ecd1d5acf616b4b837612f189206171edad73fd4864ab vim-enhanced-9.1.113-1.fc38.x86_64.rpm
8743bcb074aed6aa20914b7d0258cd6938e3642fe3550279bb1c66c6300d936a vim-filesystem-9.1.113-1.fc38.noarch.rpm
-7f8524d182dacd6bef744c11d225dd63a82100350e95fe3ec414e70cf642c1f1 wget-1.21.3-5.fc38.x86_64.rpm
+a4c8b2a90705fed491f6f7f258904637c18773d323d39e97bf9036260b79a0f6 wget-1.21.4-1.fc38.x86_64.rpm
2c8b143f3cb83efa5a31c85bea1da3164ca2dde5e2d75d25115f3e21ef98b4e0 which-2.21-39.fc38.x86_64.rpm
84f87df3afabe3de8748f172220107e5a5cbb0f0ef954386ecff6b914604aada whois-nls-5.5.18-1.fc38.noarch.rpm
59a7a5a775c196961cdc51fb89440a055295c767a632bfa684760e73650aa9a0 xkeyboard-config-2.38-1.fc38.noarch.rpm
diff --git a/image/system/mkosi.conf b/image/system/mkosi.conf
index f49c9ebd8..b23cf00a3 100644
--- a/image/system/mkosi.conf
+++ b/image/system/mkosi.conf
@@ -13,7 +13,7 @@ Seed=0e9a6fe0-68f6-408c-bbeb-136054d20445
SourceDateEpoch=0
Bootable=yes
Bootloader=uki
-KernelCommandLine=preempt=full rd.shell=0 rd.emergency=reboot loglevel=8 console=ttyS0
+KernelCommandLine=preempt=full rd.shell=0 rd.emergency=reboot loglevel=8
RemoveFiles=/var/log
RemoveFiles=/var/cache
RemoveFiles=/etc/pki/ca-trust/extracted/java/cacerts
diff --git a/image/system/variants.bzl b/image/system/variants.bzl
index 75dd7c21e..3cca05c95 100644
--- a/image/system/variants.bzl
+++ b/image/system/variants.bzl
@@ -55,6 +55,7 @@ base_cmdline = "selinux=1 enforcing=0 audit=0"
csp_settings = {
"aws": {
"kernel_command_line_dict": {
+ "console": "ttyS0",
"constel.csp": "aws",
"idle": "poll",
"mitigations": "auto",
@@ -62,20 +63,21 @@ csp_settings = {
},
"azure": {
"kernel_command_line_dict": {
+ "console": "ttyS0",
"constel.csp": "azure",
"mitigations": "auto,nosmt",
},
},
"gcp": {
"kernel_command_line_dict": {
+ "console": "ttyS0",
"constel.csp": "gcp",
"mitigations": "auto,nosmt",
},
},
"openstack": {
- "kernel_command_line": "console=tty0 console=ttyS0",
+ "kernel_command_line": "console=tty0 console=ttyS0 console=ttyS1",
"kernel_command_line_dict": {
- "console": "tty0",
"constel.csp": "openstack",
"kvm_amd.sev": "1",
"mem_encrypt": "on",
@@ -86,6 +88,7 @@ csp_settings = {
"qemu": {
"autologin": True,
"kernel_command_line_dict": {
+ "console": "ttyS0",
"constel.csp": "qemu",
"mitigations": "auto,nosmt",
},
diff --git a/image/upload/internal/cmd/info.go b/image/upload/internal/cmd/info.go
index cd629600e..b68db4929 100644
--- a/image/upload/internal/cmd/info.go
+++ b/image/upload/internal/cmd/info.go
@@ -50,7 +50,7 @@ func runInfo(cmd *cobra.Command, args []string) error {
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "region", flags.region, "bucket", flags.bucket, "distributionID", flags.distributionID)
info, err := readInfoArgs(args)
if err != nil {
return err
diff --git a/image/upload/internal/cmd/measurementsenvelope.go b/image/upload/internal/cmd/measurementsenvelope.go
index 878dcaf71..70c16a24e 100644
--- a/image/upload/internal/cmd/measurementsenvelope.go
+++ b/image/upload/internal/cmd/measurementsenvelope.go
@@ -54,7 +54,7 @@ func runEnvelopeMeasurements(cmd *cobra.Command, _ []string) error {
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "version", flags.version.Version(), "csp", flags.csp, "attestationVariant", flags.attestationVariant, "in", flags.in)
f, err := os.Open(flags.in)
if err != nil {
diff --git a/image/upload/internal/cmd/measurementsmerge.go b/image/upload/internal/cmd/measurementsmerge.go
index 53ec2de2c..78b283850 100644
--- a/image/upload/internal/cmd/measurementsmerge.go
+++ b/image/upload/internal/cmd/measurementsmerge.go
@@ -45,7 +45,7 @@ func runMergeMeasurements(cmd *cobra.Command, args []string) error {
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "out", flags.out, "logLevel", flags.logLevel)
mergedMeasurements, err := readMeasurementsArgs(args)
if err != nil {
diff --git a/image/upload/internal/cmd/measurementsupload.go b/image/upload/internal/cmd/measurementsupload.go
index 850883c63..720864c78 100644
--- a/image/upload/internal/cmd/measurementsupload.go
+++ b/image/upload/internal/cmd/measurementsupload.go
@@ -53,7 +53,7 @@ func runMeasurementsUpload(cmd *cobra.Command, _ []string) error {
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "measurementsPath", flags.measurementsPath, "signaturePath", flags.signaturePath, "region", flags.region, "bucket", flags.bucket, "distributionID", flags.distributionID)
uploadC, uploadCClose, err := measurementsuploader.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
diff --git a/image/upload/internal/cmd/uplosi.go b/image/upload/internal/cmd/uplosi.go
index 13a854683..ddfec8d70 100644
--- a/image/upload/internal/cmd/uplosi.go
+++ b/image/upload/internal/cmd/uplosi.go
@@ -59,8 +59,8 @@ func runUplosi(cmd *cobra.Command, _ []string) error {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
-
+ log.Debug("Using flags", "raw-image", flags.rawImage, "attestation-variant", flags.attestationVariant, "csp", flags.provider, "ref", flags.version.Ref(), "stream", flags.version.Stream(),
+ "version", flags.version.Version(), "region", flags.region, "bucket", flags.bucket, "distribution-id", flags.distributionID, "out", flags.out, "uplosi-path", flags.uplosiPath)
archiveC, archiveCClose, err := archive.New(cmd.Context(), flags.region, flags.bucket, flags.distributionID, log)
if err != nil {
return err
diff --git a/internal/api/client/client.go b/internal/api/client/client.go
index d9ad7ec9e..4929872ed 100644
--- a/internal/api/client/client.go
+++ b/internal/api/client/client.go
@@ -131,7 +131,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
Bucket: &c.bucket,
Prefix: &path,
}
- c.Logger.Debug(fmt.Sprintf("Listing objects in %s", path))
+ c.Logger.Debug(fmt.Sprintf("Listing objects in %q", path))
objs := []s3types.Object{}
out := &s3.ListObjectsV2Output{IsTruncated: ptr(true)}
for out.IsTruncated != nil && *out.IsTruncated {
@@ -142,7 +142,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
}
objs = append(objs, out.Contents...)
}
- c.Logger.Debug(fmt.Sprintf("Found %d objects in %s", len(objs), path))
+ c.Logger.Debug(fmt.Sprintf("Found %d objects in %q", len(objs), path))
if len(objs) == 0 {
c.Logger.Warn(fmt.Sprintf("Path %s is already empty", path))
@@ -167,7 +167,7 @@ func (c *Client) DeletePath(ctx context.Context, path string) error {
Objects: objIDs,
},
}
- c.Logger.Debug(fmt.Sprintf("Deleting %d objects in %s", len(objs), path))
+ c.Logger.Debug(fmt.Sprintf("Deleting %d objects in %q", len(objs), path))
if _, err := c.s3Client.DeleteObjects(ctx, deleteIn); err != nil {
return fmt.Errorf("deleting objects in %s: %w", path, err)
}
@@ -197,7 +197,7 @@ func Fetch[T APIObject](ctx context.Context, c *Client, obj T) (T, error) {
Key: ptr(obj.JSONPath()),
}
- c.Logger.Debug(fmt.Sprintf("Fetching %T from s3: %s", obj, obj.JSONPath()))
+ c.Logger.Debug(fmt.Sprintf("Fetching %T from s3: %q", obj, obj.JSONPath()))
out, err := c.s3Client.GetObject(ctx, in)
var noSuchkey *s3types.NoSuchKey
if errors.As(err, &noSuchkey) {
@@ -243,7 +243,7 @@ func Update(ctx context.Context, c *Client, obj APIObject) error {
c.dirtyPaths = append(c.dirtyPaths, "/"+obj.JSONPath())
- c.Logger.Debug(fmt.Sprintf("Uploading %T to s3: %v", obj, obj.JSONPath()))
+ c.Logger.Debug(fmt.Sprintf("Uploading %T to s3: %q", obj, obj.JSONPath()))
if _, err := c.Upload(ctx, in); err != nil {
return fmt.Errorf("uploading %T: %w", obj, err)
}
@@ -306,7 +306,7 @@ func Delete(ctx context.Context, c *Client, obj APIObject) error {
Key: ptr(obj.JSONPath()),
}
- c.Logger.Debug(fmt.Sprintf("Deleting %T from s3: %s", obj, obj.JSONPath()))
+ c.Logger.Debug(fmt.Sprintf("Deleting %T from s3: %q", obj, obj.JSONPath()))
if _, err := c.DeleteObject(ctx, in); err != nil {
return fmt.Errorf("deleting s3 object at %s: %w", obj.JSONPath(), err)
}
diff --git a/internal/api/versionsapi/cli/add.go b/internal/api/versionsapi/cli/add.go
index 89c64c2b7..f1a6fc4fd 100644
--- a/internal/api/versionsapi/cli/add.go
+++ b/internal/api/versionsapi/cli/add.go
@@ -53,7 +53,8 @@ func runAdd(cmd *cobra.Command, _ []string) (retErr error) {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "dryRun", flags.dryRun, "kind", flags.kind, "latest", flags.latest, "ref", flags.ref,
+ "release", flags.release, "stream", flags.stream, "version", flags.version)
log.Debug("Validating flags")
if err := flags.validate(log); err != nil {
@@ -117,7 +118,7 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, kind version
} else if err != nil {
return fmt.Errorf("failed to list minor versions: %w", err)
}
- log.Debug(fmt.Sprintf("%s version list: %v", gran.String(), verList))
+ log.Debug(fmt.Sprintf("%q version list: %v", gran.String(), verList.Versions))
insertGran := gran + 1
insertVersion := ver.WithGranularity(insertGran)
@@ -129,7 +130,7 @@ func ensureVersion(ctx context.Context, client *versionsapi.Client, kind version
log.Info(fmt.Sprintf("Inserting %s version %q into list", insertGran.String(), insertVersion))
verList.Versions = append(verList.Versions, insertVersion)
- log.Debug(fmt.Sprintf("New %s version list: %v", gran.String(), verList))
+ log.Debug(fmt.Sprintf("New %q version list: %v", gran.String(), verList.Versions))
if err := client.UpdateVersionList(ctx, verList); err != nil {
return fmt.Errorf("failed to add %s version: %w", gran.String(), err)
diff --git a/internal/api/versionsapi/cli/latest.go b/internal/api/versionsapi/cli/latest.go
index 797cfc64d..6b3c3983f 100644
--- a/internal/api/versionsapi/cli/latest.go
+++ b/internal/api/versionsapi/cli/latest.go
@@ -39,7 +39,7 @@ func runLatest(cmd *cobra.Command, _ []string) (retErr error) {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "ref", flags.ref, "stream", flags.stream, "json", flags.json)
log.Debug("Validating flags")
if err := flags.validate(); err != nil {
diff --git a/internal/api/versionsapi/cli/list.go b/internal/api/versionsapi/cli/list.go
index 717ba6c77..1aa6d88c6 100644
--- a/internal/api/versionsapi/cli/list.go
+++ b/internal/api/versionsapi/cli/list.go
@@ -44,7 +44,8 @@ func runList(cmd *cobra.Command, _ []string) (retErr error) {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "bucket", flags.bucket, "distributionID", flags.distributionID, "json", flags.json, "minorVersion", flags.minorVersion,
+ "ref", flags.ref, "region", flags.region, "stream", flags.stream)
log.Debug("Validating flags")
if err := flags.validate(); err != nil {
diff --git a/internal/api/versionsapi/cli/rm.go b/internal/api/versionsapi/cli/rm.go
index 51802b5fb..b5e90bb58 100644
--- a/internal/api/versionsapi/cli/rm.go
+++ b/internal/api/versionsapi/cli/rm.go
@@ -75,7 +75,9 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) {
return err
}
log := logger.NewTextLogger(flags.logLevel)
- log.Debug(fmt.Sprintf("Parsed flags: %+v", flags))
+ log.Debug("Using flags", "all", flags.all, "azLocation", flags.azLocation, "azResourceGroup", flags.azResourceGroup, "azSubscription", flags.azSubscription,
+ "bucket", flags.bucket, "distributionID", flags.distributionID, "dryrun", flags.dryrun, "gcpProject", flags.gcpProject, "ref", flags.ref,
+ "region", flags.region, "stream", flags.stream, "version", flags.version, "versionPath", flags.versionPath)
log.Debug("Validating flags")
if err := flags.validate(); err != nil {
@@ -138,12 +140,12 @@ func runRemove(cmd *cobra.Command, _ []string) (retErr error) {
func deleteSingleVersion(ctx context.Context, clients rmImageClients, ver versionsapi.Version, dryrun bool, log *slog.Logger) error {
var retErr error
- log.Debug(fmt.Sprintf("Deleting images for %s", ver.Version()))
+ log.Debug(fmt.Sprintf("Deleting images for %q", ver.Version()))
if err := deleteImage(ctx, clients, ver, dryrun, log); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting images: %w", err))
}
- log.Debug(fmt.Sprintf("Deleting version %s from versions API", ver.Version()))
+ log.Debug(fmt.Sprintf("Deleting version %q from versions API", ver.Version()))
if err := clients.version.DeleteVersion(ctx, ver); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting version from versions API: %w", err))
}
@@ -159,7 +161,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b
minorVersions, err := listMinorVersions(ctx, clients.version, ref, stream)
var notFoundErr *apiclient.NotFoundError
if errors.As(err, ¬FoundErr) {
- log.Debug(fmt.Sprintf("No minor versions found for stream %s", stream))
+ log.Debug(fmt.Sprintf("No minor versions found for stream %q", stream))
continue
} else if err != nil {
return fmt.Errorf("listing minor versions for stream %s: %w", stream, err)
@@ -167,7 +169,7 @@ func deleteRef(ctx context.Context, clients rmImageClients, ref string, dryrun b
patchVersions, err := listPatchVersions(ctx, clients.version, ref, stream, minorVersions)
if errors.As(err, ¬FoundErr) {
- log.Debug(fmt.Sprintf("No patch versions found for stream %s", stream))
+ log.Debug(fmt.Sprintf("No patch versions found for stream %q", stream))
continue
} else if err != nil {
return fmt.Errorf("listing patch versions for stream %s: %w", stream, err)
@@ -406,7 +408,7 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string,
return err
}
a.ec2 = ec2.NewFromConfig(cfg)
- log.Debug(fmt.Sprintf("Deleting resources in AWS region %s", region))
+ log.Debug(fmt.Sprintf("Deleting resources in AWS region %q", region))
snapshotID, err := a.getSnapshotID(ctx, ami, log)
if err != nil {
@@ -427,7 +429,7 @@ func (a *awsClient) deleteImage(ctx context.Context, ami string, region string,
}
func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool, log *slog.Logger) error {
- log.Debug(fmt.Sprintf("Deregistering image %s", ami))
+ log.Debug(fmt.Sprintf("Deregistering image %q", ami))
deregisterReq := ec2.DeregisterImageInput{
ImageId: &ami,
@@ -446,7 +448,7 @@ func (a *awsClient) deregisterImage(ctx context.Context, ami string, dryrun bool
}
func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Logger) (string, error) {
- log.Debug(fmt.Sprintf("Describing image %s", ami))
+ log.Debug(fmt.Sprintf("Describing image %q", ami))
req := ec2.DescribeImagesInput{
ImageIds: []string{ami},
@@ -482,7 +484,7 @@ func (a *awsClient) getSnapshotID(ctx context.Context, ami string, log *slog.Log
}
func (a *awsClient) deleteSnapshot(ctx context.Context, snapshotID string, dryrun bool, log *slog.Logger) error {
- log.Debug(fmt.Sprintf("Deleting AWS snapshot %s", snapshotID))
+ log.Debug(fmt.Sprintf("Deleting AWS snapshot %q", snapshotID))
req := ec2.DeleteSnapshotInput{
SnapshotId: &snapshotID,
@@ -536,11 +538,11 @@ func (g *gcpClient) deleteImage(ctx context.Context, imageURI string, dryrun boo
}
if dryrun {
- log.Debug(fmt.Sprintf("DryRun: delete image request: %v", req))
+ log.Debug(fmt.Sprintf("DryRun: delete image request: %q", req.String()))
return nil
}
- log.Debug(fmt.Sprintf("Deleting image %s", image))
+ log.Debug(fmt.Sprintf("Deleting image %q", image))
op, err := g.compute.Delete(ctx, req)
if err != nil && strings.Contains(err.Error(), "404") {
log.Warn(fmt.Sprintf("GCP image %s not found", image))
@@ -631,7 +633,7 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool
}
if dryrun {
- log.Debug(fmt.Sprintf("DryRun: delete image %v", azImage))
+ log.Debug(fmt.Sprintf("DryRun: delete image: gallery: %q, image definition: %q, resource group: %q, version: %q", azImage.gallery, azImage.imageDefinition, azImage.resourceGroup, azImage.version))
return nil
}
@@ -663,7 +665,7 @@ func (a *azureClient) deleteImage(ctx context.Context, image string, dryrun bool
time.Sleep(15 * time.Second) // Azure needs time understand that there is no version left...
- log.Debug(fmt.Sprintf("Deleting image definition %s", azImage.imageDefinition))
+ log.Debug(fmt.Sprintf("Deleting image definition %q", azImage.imageDefinition))
op, err := a.image.BeginDelete(ctx, azImage.resourceGroup, azImage.gallery, azImage.imageDefinition, nil)
if err != nil {
return fmt.Errorf("deleting image definition %s: %w", azImage.imageDefinition, err)
@@ -687,7 +689,7 @@ type azImage struct {
func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Logger) (azImage, error) {
if m := azImageRegex.FindStringSubmatch(image); len(m) == 5 {
log.Debug(fmt.Sprintf(
- "Image matches local image format, resource group: %s, gallery: %s, image definition: %s, version: %s",
+ "Image matches local image format, resource group: %q, gallery: %q, image definition: %q, version: %q",
m[1], m[2], m[3], m[4],
))
return azImage{
@@ -708,7 +710,7 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Lo
version := m[3]
log.Debug(fmt.Sprintf(
- "Image matches community image format, gallery public name: %s, image definition: %s, version: %s",
+ "Image matches community image format, gallery public name: %q, image definition: %q, version: %q",
galleryPublicName, imageDefinition, version,
))
@@ -725,15 +727,15 @@ func (a *azureClient) parseImage(ctx context.Context, image string, log *slog.Lo
continue
}
if v.Properties.SharingProfile == nil {
- log.Debug(fmt.Sprintf("Skipping gallery %s with nil sharing profile", *v.Name))
+ log.Debug(fmt.Sprintf("Skipping gallery %q with nil sharing profile", *v.Name))
continue
}
if v.Properties.SharingProfile.CommunityGalleryInfo == nil {
- log.Debug(fmt.Sprintf("Skipping gallery %s with nil community gallery info", *v.Name))
+ log.Debug(fmt.Sprintf("Skipping gallery %q with nil community gallery info", *v.Name))
continue
}
if v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames == nil {
- log.Debug(fmt.Sprintf("Skipping gallery %s with nil public names", *v.Name))
+ log.Debug(fmt.Sprintf("Skipping gallery %q with nil public names", *v.Name))
continue
}
for _, publicName := range v.Properties.SharingProfile.CommunityGalleryInfo.PublicNames {
diff --git a/internal/api/versionsapi/client.go b/internal/api/versionsapi/client.go
index c03e8a7b6..5d14fdacd 100644
--- a/internal/api/versionsapi/client.go
+++ b/internal/api/versionsapi/client.go
@@ -131,18 +131,18 @@ func (c *Client) DeleteRef(ctx context.Context, ref string) error {
func (c *Client) DeleteVersion(ctx context.Context, ver Version) error {
var retErr error
- c.Client.Logger.Debug(fmt.Sprintf("Deleting version %s from minor version list", ver.version))
+ c.Client.Logger.Debug(fmt.Sprintf("Deleting version %q from minor version list", ver.version))
possibleNewLatest, err := c.deleteVersionFromMinorVersionList(ctx, ver)
if err != nil {
retErr = errors.Join(retErr, fmt.Errorf("removing from minor version list: %w", err))
}
- c.Client.Logger.Debug(fmt.Sprintf("Checking latest version for %s", ver.version))
+ c.Client.Logger.Debug(fmt.Sprintf("Checking latest version for %q", ver.version))
if err := c.deleteVersionFromLatest(ctx, ver, possibleNewLatest); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("updating latest version: %w", err))
}
- c.Client.Logger.Debug(fmt.Sprintf("Deleting artifact path %s for %s", ver.ArtifactPath(APIV1), ver.version))
+ c.Client.Logger.Debug(fmt.Sprintf("Deleting artifact path %q for %q", ver.ArtifactPath(APIV1), ver.version))
if err := c.Client.DeletePath(ctx, ver.ArtifactPath(APIV1)); err != nil {
retErr = errors.Join(retErr, fmt.Errorf("deleting artifact path: %w", err))
}
@@ -159,7 +159,7 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers
Base: ver.WithGranularity(GranularityMinor),
Kind: VersionKindImage,
}
- c.Client.Logger.Debug(fmt.Sprintf("Fetching minor version list for version %s", ver.version))
+ c.Client.Logger.Debug(fmt.Sprintf("Fetching minor version list for version %q", ver.version))
minorList, err := c.FetchVersionList(ctx, minorList)
var notFoundErr *apiclient.NotFoundError
if errors.As(err, ¬FoundErr) {
@@ -196,16 +196,16 @@ func (c *Client) deleteVersionFromMinorVersionList(ctx context.Context, ver Vers
}
if c.Client.DryRun {
- c.Client.Logger.Debug(fmt.Sprintf("DryRun: Updating minor version list %s to %v", minorList.JSONPath(), minorList))
+ c.Client.Logger.Debug(fmt.Sprintf("DryRun: Updating minor version list %q to %v", minorList.JSONPath(), minorList))
return latest, nil
}
- c.Client.Logger.Debug(fmt.Sprintf("Updating minor version list %s", minorList.JSONPath()))
+ c.Client.Logger.Debug(fmt.Sprintf("Updating minor version list %q", minorList.JSONPath()))
if err := c.UpdateVersionList(ctx, minorList); err != nil {
return latest, fmt.Errorf("updating minor version list %s: %w", minorList.JSONPath(), err)
}
- c.Client.Logger.Debug(fmt.Sprintf("Removed version %s from minor version list %s", ver.version, minorList.JSONPath()))
+ c.Client.Logger.Debug(fmt.Sprintf("Removed version %q from minor version list %q", ver.version, minorList.JSONPath()))
return latest, nil
}
@@ -216,7 +216,7 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi
Stream: ver.stream,
Kind: VersionKindImage,
}
- c.Client.Logger.Debug(fmt.Sprintf("Fetching latest version from %s", latest.JSONPath()))
+ c.Client.Logger.Debug(fmt.Sprintf("Fetching latest version from %q", latest.JSONPath()))
latest, err := c.FetchVersionLatest(ctx, latest)
var notFoundErr *apiclient.NotFoundError
if errors.As(err, ¬FoundErr) {
@@ -227,7 +227,7 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi
}
if latest.Version != ver.version {
- c.Client.Logger.Debug(fmt.Sprintf("Latest version is %s, not the deleted version %s", latest.Version, ver.version))
+ c.Client.Logger.Debug(fmt.Sprintf("Latest version is %q, not the deleted version %q", latest.Version, ver.version))
return nil
}
@@ -238,7 +238,7 @@ func (c *Client) deleteVersionFromLatest(ctx context.Context, ver Version, possi
}
if c.Client.DryRun {
- c.Client.Logger.Debug(fmt.Sprintf("Would update latest version from %s to %s", latest.Version, possibleNewLatest.Version))
+ c.Client.Logger.Debug(fmt.Sprintf("Would update latest version from %q to %q", latest.Version, possibleNewLatest.Version))
return nil
}
diff --git a/internal/attestation/measurements/measurements.go b/internal/attestation/measurements/measurements.go
index 62172dd26..a702706bd 100644
--- a/internal/attestation/measurements/measurements.go
+++ b/internal/attestation/measurements/measurements.go
@@ -26,6 +26,7 @@ import (
"net/url"
"sort"
"strconv"
+ "strings"
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
@@ -330,6 +331,15 @@ func (m *M) UnmarshalYAML(unmarshal func(any) error) error {
return nil
}
+// String returns a string representation of the measurements.
+func (m M) String() string {
+ var returnString string
+ for i, measurement := range m {
+ returnString = strings.Join([]string{returnString, fmt.Sprintf("%d: 0x%s", i, hex.EncodeToString(measurement.Expected))}, ",")
+ }
+ return returnString
+}
+
func (m *M) fromImageMeasurementsV2(
measurements ImageMeasurementsV2, wantVersion versionsapi.Version,
csp cloudprovider.Provider, attestationVariant variant.Variant,
diff --git a/internal/attestation/measurements/measurements_enterprise.go b/internal/attestation/measurements/measurements_enterprise.go
index 8e676cd04..06f79ab19 100644
--- a/internal/attestation/measurements/measurements_enterprise.go
+++ b/internal/attestation/measurements/measurements_enterprise.go
@@ -16,13 +16,13 @@ package measurements
// revive:disable:var-naming
var (
- aws_AWSNitroTPM = M{0: {Expected: []byte{0x73, 0x7f, 0x76, 0x7a, 0x12, 0xf5, 0x4e, 0x70, 0xee, 0xcb, 0xc8, 0x68, 0x40, 0x11, 0x32, 0x3a, 0xe2, 0xfe, 0x2d, 0xd9, 0xf9, 0x07, 0x85, 0x57, 0x79, 0x69, 0xd7, 0xa2, 0x01, 0x3e, 0x8c, 0x12}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x82, 0x51, 0xcf, 0x40, 0x53, 0x18, 0x0d, 0x53, 0x64, 0x75, 0x03, 0x45, 0xa5, 0xa8, 0x16, 0xe2, 0x4b, 0x23, 0x40, 0x83, 0x43, 0x83, 0xbd, 0x43, 0x49, 0x8b, 0x8f, 0xb7, 0xc9, 0x4d, 0x57, 0xdd}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x5d, 0x1b, 0xbe, 0xd7, 0xec, 0x13, 0xc8, 0xd5, 0xea, 0x93, 0x71, 0xce, 0x79, 0x02, 0x13, 0xbe, 0x82, 0x5b, 0xed, 0x55, 0x26, 0x77, 0x0a, 0x87, 0x7c, 0x45, 0xdf, 0x76, 0xdd, 0x1b, 0x2a, 0x12}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x8a, 0x2b, 0xe4, 0xe4, 0x71, 0x23, 0xbd, 0xc8, 0x99, 0x0d, 0xbe, 0x36, 0xad, 0x77, 0xee, 0x04, 0xc5, 0x4e, 0x7d, 0x68, 0xc6, 0x1d, 0x86, 0xde, 0x9d, 0x0a, 0xf3, 0x48, 0xe6, 0xdf, 0x60, 0x15}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
- aws_AWSSEVSNP = M{0: {Expected: []byte{0x7b, 0x06, 0x8c, 0x0c, 0x3a, 0xc2, 0x9a, 0xfe, 0x26, 0x41, 0x34, 0x53, 0x6b, 0x9b, 0xe2, 0x6f, 0x1d, 0x4c, 0xcd, 0x57, 0x5b, 0x88, 0xd3, 0xc3, 0xce, 0xab, 0xf3, 0x6a, 0xc9, 0x9c, 0x02, 0x78}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x74, 0x17, 0x6b, 0x8a, 0x77, 0xd9, 0x49, 0x1a, 0xd3, 0xe5, 0x62, 0xd2, 0xea, 0xba, 0x5f, 0x85, 0xd7, 0x3d, 0x26, 0x4e, 0x30, 0xaf, 0x78, 0x7e, 0x58, 0x33, 0x47, 0x13, 0x8e, 0x56, 0x23, 0x8a}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x3e, 0xc6, 0xf2, 0xe4, 0xf0, 0x59, 0xee, 0xf7, 0x29, 0xe4, 0xde, 0x5f, 0x70, 0x67, 0x15, 0x8d, 0x1f, 0x10, 0x1d, 0x7d, 0xd1, 0x40, 0x3c, 0x9f, 0x87, 0x48, 0xfb, 0xf5, 0x77, 0xa6, 0x03, 0xbb}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x84, 0xae, 0x81, 0x49, 0x2f, 0x9c, 0x6e, 0x2c, 0x8c, 0x49, 0x39, 0x27, 0x79, 0x7e, 0x85, 0x8d, 0xbc, 0xf1, 0x2f, 0x80, 0xde, 0x43, 0xd0, 0xbb, 0x47, 0xe7, 0xf0, 0x50, 0x3e, 0xa2, 0xa8, 0x6a}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
- azure_AzureSEVSNP = M{1: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x4b, 0xdf, 0xb0, 0xb8, 0x91, 0xde, 0x2a, 0xaa, 0xc7, 0x3f, 0x1a, 0xb6, 0x9c, 0xde, 0xb2, 0xed, 0x66, 0x62, 0x29, 0xf5, 0x83, 0x40, 0x9e, 0x95, 0x00, 0xca, 0x21, 0xdb, 0xb1, 0xf7, 0x06, 0x7d}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x3c, 0xb3, 0x7a, 0xfe, 0xeb, 0x1c, 0xb7, 0xf8, 0x98, 0xb5, 0x0d, 0x3b, 0x9d, 0x17, 0xa7, 0x3b, 0x83, 0x61, 0x2d, 0xef, 0xaa, 0x3a, 0xeb, 0x1c, 0xcd, 0x61, 0x99, 0xbf, 0x48, 0x9e, 0x28, 0xd2}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x2c, 0xbe, 0x09, 0x13, 0xac, 0xfb, 0x6f, 0xcf, 0x6d, 0xa9, 0x55, 0xbc, 0x77, 0xf4, 0xed, 0x27, 0x0b, 0xfc, 0x1c, 0xe4, 0xf8, 0x02, 0xd4, 0xc9, 0xf5, 0x37, 0xde, 0x1f, 0x85, 0xa2, 0x66, 0x25}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
- azure_AzureTDX = M{1: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x2f, 0xda, 0xe7, 0x93, 0xbe, 0x8d, 0x3d, 0x81, 0xaf, 0x62, 0x5d, 0x37, 0x95, 0x26, 0x37, 0x02, 0x36, 0xe2, 0x22, 0x99, 0x49, 0x36, 0x7a, 0x09, 0x90, 0x1b, 0x4c, 0x6d, 0xf1, 0xfa, 0x43, 0x7e}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x76, 0xcd, 0x4f, 0xe2, 0x88, 0xa6, 0xa5, 0xc0, 0x0f, 0x63, 0x0d, 0x75, 0x0c, 0xe8, 0x24, 0xc0, 0x96, 0x2b, 0xbf, 0xbd, 0x78, 0xda, 0xd9, 0x1b, 0x18, 0x20, 0x4b, 0x2e, 0xf9, 0x00, 0x49, 0xbf}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xf0, 0x93, 0x1f, 0xfc, 0x64, 0x30, 0xff, 0x44, 0xbd, 0xca, 0xe0, 0x50, 0xa9, 0xe2, 0xd6, 0x2a, 0xd2, 0xae, 0x64, 0x08, 0xc1, 0x0e, 0x33, 0x4f, 0x9b, 0xc6, 0x2b, 0xb6, 0xae, 0x88, 0x60, 0x8c}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
+ aws_AWSNitroTPM = M{0: {Expected: []byte{0x73, 0x7f, 0x76, 0x7a, 0x12, 0xf5, 0x4e, 0x70, 0xee, 0xcb, 0xc8, 0x68, 0x40, 0x11, 0x32, 0x3a, 0xe2, 0xfe, 0x2d, 0xd9, 0xf9, 0x07, 0x85, 0x57, 0x79, 0x69, 0xd7, 0xa2, 0x01, 0x3e, 0x8c, 0x12}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x4b, 0x24, 0x18, 0x9e, 0x05, 0xfe, 0x66, 0xbc, 0x9d, 0x14, 0xd3, 0xa3, 0x48, 0x8a, 0x71, 0x97, 0x9b, 0x80, 0x8e, 0x93, 0xbe, 0x9d, 0xf2, 0x5e, 0x92, 0x9f, 0x5d, 0x99, 0x23, 0x90, 0xf1, 0xe9}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xc9, 0x15, 0xb2, 0x9b, 0x8f, 0xc5, 0x0c, 0xdd, 0xfb, 0x3a, 0xe1, 0x4c, 0xa8, 0xd9, 0x49, 0x92, 0xa5, 0xb8, 0x5a, 0x1e, 0x45, 0x1f, 0x2f, 0x6e, 0x1d, 0x31, 0x84, 0xfe, 0x6e, 0xa6, 0x25, 0x62}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x47, 0x73, 0x78, 0x57, 0x56, 0xf5, 0xe7, 0xb8, 0x1c, 0x2c, 0x59, 0x5a, 0x5e, 0x13, 0x6d, 0xdb, 0x59, 0x48, 0x99, 0x69, 0x2d, 0x3d, 0x1d, 0x18, 0xd9, 0x3c, 0xdc, 0xbf, 0x7f, 0x1f, 0x4b, 0x63}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
+ aws_AWSSEVSNP = M{0: {Expected: []byte{0x7b, 0x06, 0x8c, 0x0c, 0x3a, 0xc2, 0x9a, 0xfe, 0x26, 0x41, 0x34, 0x53, 0x6b, 0x9b, 0xe2, 0x6f, 0x1d, 0x4c, 0xcd, 0x57, 0x5b, 0x88, 0xd3, 0xc3, 0xce, 0xab, 0xf3, 0x6a, 0xc9, 0x9c, 0x02, 0x78}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0xdd, 0xf2, 0x5d, 0xc4, 0x2f, 0xce, 0xf5, 0x05, 0xc6, 0xec, 0xa6, 0x06, 0x84, 0x7b, 0x18, 0xee, 0x9e, 0x39, 0x9c, 0xb7, 0x3a, 0x66, 0x1d, 0x35, 0x8d, 0x15, 0x3a, 0xdb, 0x68, 0x73, 0x5d, 0x2e}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x78, 0x18, 0x1d, 0x22, 0x96, 0x61, 0x9f, 0xa5, 0x7b, 0xbb, 0xb9, 0x55, 0xa1, 0x92, 0xc5, 0x5c, 0x5c, 0xf9, 0xad, 0x3e, 0x40, 0xd5, 0xe0, 0x48, 0xd6, 0xeb, 0x43, 0x66, 0x63, 0xfa, 0xb1, 0xc9}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xf0, 0x0a, 0xd3, 0x61, 0x64, 0x6b, 0x4a, 0xfb, 0x80, 0x2a, 0xa2, 0xda, 0xb4, 0xcf, 0x8f, 0x95, 0x8e, 0xb6, 0x53, 0xf5, 0xd8, 0xf4, 0x8b, 0xff, 0x17, 0xc9, 0x83, 0x16, 0x12, 0x7f, 0xbc, 0xdb}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
+ azure_AzureSEVSNP = M{1: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0xd4, 0x12, 0xf0, 0xc1, 0x94, 0x81, 0x88, 0x9d, 0xbc, 0xd8, 0x76, 0xad, 0x54, 0x86, 0xd4, 0x1f, 0x4c, 0x0f, 0x74, 0x07, 0x22, 0xe3, 0x1e, 0xfd, 0x75, 0x28, 0x93, 0xe8, 0xc1, 0x00, 0x60, 0x4a}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xb0, 0xd7, 0x14, 0x20, 0x95, 0xfb, 0xff, 0x72, 0xf9, 0x2e, 0x42, 0x84, 0xf2, 0x59, 0x79, 0xf9, 0x43, 0x59, 0x45, 0x61, 0xc6, 0x40, 0xfa, 0xdd, 0x1d, 0xe9, 0x6f, 0x14, 0x8a, 0x46, 0x89, 0x9a}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xdf, 0x2f, 0x4f, 0x3d, 0xe0, 0xfe, 0x98, 0x9e, 0x45, 0x68, 0x51, 0xe5, 0x3c, 0xc9, 0x42, 0x20, 0x58, 0x39, 0xa7, 0x98, 0x51, 0x46, 0x61, 0x66, 0x64, 0x80, 0x92, 0xd0, 0x2a, 0x7c, 0x9d, 0x58}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
+ azure_AzureTDX = M{1: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x31, 0xcf, 0x3a, 0x66, 0xbf, 0xa4, 0x2e, 0xa8, 0x10, 0x64, 0x18, 0xbd, 0x5b, 0x58, 0xd2, 0x31, 0x1f, 0xb2, 0xc2, 0xe7, 0xd0, 0x78, 0x9c, 0x5a, 0x0f, 0x3a, 0xfc, 0xdd, 0xea, 0xd5, 0x0b, 0xd2}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x71, 0x53, 0xf6, 0x6e, 0x3a, 0x75, 0x59, 0x85, 0x56, 0xf0, 0x3c, 0xad, 0xa7, 0x27, 0xb7, 0x76, 0x30, 0xb0, 0xe9, 0xe1, 0x09, 0xc0, 0x80, 0x18, 0xfd, 0x31, 0xda, 0x15, 0xa7, 0xdc, 0xd2, 0x46}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x43, 0x46, 0xeb, 0xd1, 0x79, 0xf3, 0xc4, 0x95, 0x53, 0x12, 0x5b, 0xe3, 0x94, 0xa0, 0x99, 0x47, 0xc6, 0x44, 0xb0, 0x0a, 0xc3, 0xe0, 0x72, 0xea, 0xdd, 0xf7, 0xda, 0x98, 0xb0, 0x44, 0x98, 0xe0}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
azure_AzureTrustedLaunch M
- gcp_GCPSEVES = M{1: {Expected: []byte{0x36, 0x95, 0xdc, 0xc5, 0x5e, 0x3a, 0xa3, 0x40, 0x27, 0xc2, 0x77, 0x93, 0xc8, 0x5c, 0x72, 0x3c, 0x69, 0x7d, 0x70, 0x8c, 0x42, 0xd1, 0xf7, 0x3b, 0xd6, 0xfa, 0x4f, 0x26, 0x60, 0x8a, 0x5b, 0x24}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x4b, 0x86, 0x61, 0xc4, 0xa5, 0xcf, 0xf5, 0xab, 0x94, 0x66, 0x89, 0xf9, 0x03, 0xac, 0x96, 0xda, 0x3f, 0x39, 0xbb, 0xf6, 0xa0, 0xf6, 0x9a, 0x6d, 0x56, 0x01, 0xd4, 0x21, 0xbd, 0xb2, 0x03, 0x61}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xfd, 0xce, 0x5f, 0xe7, 0xe9, 0xfa, 0x32, 0xb6, 0x38, 0xc9, 0x96, 0x6a, 0x7b, 0x33, 0xbb, 0x39, 0x83, 0xa3, 0x78, 0x69, 0x2a, 0xa7, 0x4e, 0x91, 0xfd, 0x8c, 0xc7, 0x96, 0xa2, 0x46, 0xc5, 0x33}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x0d, 0xbc, 0xbe, 0x7f, 0x07, 0x46, 0xa1, 0x83, 0x4f, 0xfa, 0x4d, 0x88, 0xdb, 0xee, 0xa1, 0xb8, 0x0c, 0x9a, 0x6b, 0xac, 0x1f, 0x06, 0x88, 0x41, 0xb9, 0x69, 0x0a, 0xdb, 0xfe, 0xab, 0x09, 0x28}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
- openstack_QEMUVTPM = M{4: {Expected: []byte{0x85, 0x68, 0x3d, 0xec, 0xd8, 0x84, 0x84, 0xeb, 0x89, 0x71, 0x53, 0x1e, 0x33, 0x84, 0x27, 0x40, 0x70, 0x26, 0xce, 0x88, 0xd1, 0x6e, 0x75, 0x24, 0xcd, 0xb3, 0xbc, 0x7a, 0x7e, 0x53, 0x45, 0x7c}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xa2, 0x83, 0x80, 0x53, 0x68, 0xff, 0x7f, 0xc3, 0xe1, 0x9d, 0xdf, 0x49, 0x6d, 0x7a, 0x8c, 0x42, 0x53, 0x02, 0xc4, 0x5d, 0x2e, 0xd4, 0x2d, 0x3e, 0x85, 0xc8, 0x67, 0xf6, 0x6e, 0x88, 0x29, 0x1c}, ValidationOpt: Enforce}, 11: {Expected: []byte{0x52, 0x9b, 0x1e, 0x7a, 0x81, 0xb5, 0xca, 0x2f, 0x12, 0x56, 0x7d, 0x73, 0xe5, 0x0f, 0xf0, 0x83, 0x77, 0x1f, 0x2e, 0x54, 0x1d, 0x19, 0xd7, 0x99, 0xfd, 0xb3, 0xc6, 0x87, 0xf6, 0x33, 0x2f, 0x7c}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
+ gcp_GCPSEVES = M{1: {Expected: []byte{0x36, 0x95, 0xdc, 0xc5, 0x5e, 0x3a, 0xa3, 0x40, 0x27, 0xc2, 0x77, 0x93, 0xc8, 0x5c, 0x72, 0x3c, 0x69, 0x7d, 0x70, 0x8c, 0x42, 0xd1, 0xf7, 0x3b, 0xd6, 0xfa, 0x4f, 0x26, 0x60, 0x8a, 0x5b, 0x24}, ValidationOpt: WarnOnly}, 2: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 3: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 4: {Expected: []byte{0x71, 0xf1, 0xec, 0x24, 0x63, 0xae, 0x63, 0x33, 0x36, 0x87, 0x87, 0xd1, 0x4d, 0x47, 0x4c, 0x9f, 0x14, 0x24, 0xe0, 0x4d, 0xa7, 0x9d, 0x80, 0xff, 0x8c, 0x62, 0xc6, 0x81, 0xab, 0xce, 0x13, 0x93}, ValidationOpt: Enforce}, 6: {Expected: []byte{0x3d, 0x45, 0x8c, 0xfe, 0x55, 0xcc, 0x03, 0xea, 0x1f, 0x44, 0x3f, 0x15, 0x62, 0xbe, 0xec, 0x8d, 0xf5, 0x1c, 0x75, 0xe1, 0x4a, 0x9f, 0xcf, 0x9a, 0x72, 0x34, 0xa1, 0x3f, 0x19, 0x8e, 0x79, 0x69}, ValidationOpt: WarnOnly}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x83, 0x21, 0xcd, 0xc4, 0x6c, 0x73, 0x64, 0xf8, 0x49, 0x00, 0xb9, 0xce, 0xbc, 0xb4, 0x47, 0xe5, 0xa4, 0x6a, 0xf9, 0xec, 0x93, 0x10, 0x1e, 0xd6, 0xd9, 0xdb, 0x63, 0xc4, 0x14, 0x03, 0x72, 0x05}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xb6, 0xc5, 0xec, 0x69, 0x46, 0xdf, 0x1e, 0xf1, 0x59, 0xbe, 0x1e, 0xc0, 0x20, 0xd0, 0x8f, 0x8f, 0x6f, 0xc9, 0x5f, 0x95, 0x9d, 0xb1, 0x03, 0xcd, 0xe3, 0xa0, 0x57, 0x05, 0x4c, 0xe7, 0x70, 0x80}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
+ openstack_QEMUVTPM = M{4: {Expected: []byte{0x2f, 0x3f, 0xf4, 0xe5, 0x54, 0x89, 0x3b, 0xd3, 0x52, 0x34, 0x26, 0x29, 0x40, 0xdc, 0x16, 0x08, 0x12, 0x2f, 0x9e, 0xcd, 0x03, 0xc1, 0x93, 0x0e, 0x23, 0xf9, 0x7a, 0xe0, 0xc6, 0x33, 0x51, 0xda}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0x16, 0x49, 0x86, 0x6d, 0xe5, 0xf2, 0x0b, 0x56, 0xf3, 0xbd, 0x25, 0xb0, 0x0b, 0x6c, 0x17, 0x98, 0x1a, 0x1e, 0x2c, 0x51, 0x35, 0xa9, 0xa5, 0xdd, 0x6c, 0x9b, 0xa4, 0xb2, 0x5f, 0xf8, 0x2d, 0x71}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xb3, 0xe2, 0xba, 0x2d, 0x45, 0x12, 0xad, 0x4f, 0x71, 0x59, 0xf2, 0x1b, 0xd1, 0x85, 0x91, 0x64, 0x43, 0x19, 0x06, 0x3e, 0x29, 0x16, 0xcb, 0x25, 0x4e, 0xce, 0x0c, 0xbe, 0xe7, 0xfe, 0x13, 0xda}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 14: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: WarnOnly}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
qemu_QEMUTDX M
- qemu_QEMUVTPM = M{4: {Expected: []byte{0xa5, 0xb1, 0x6b, 0x64, 0x66, 0x8c, 0x31, 0x59, 0xc6, 0xbd, 0x69, 0x9b, 0x4d, 0x26, 0x77, 0x0e, 0xf0, 0xbf, 0xe9, 0xdf, 0x32, 0x2d, 0xa6, 0x8c, 0x11, 0x1d, 0x9c, 0x9e, 0x89, 0x0e, 0x7b, 0x93}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xfc, 0x4b, 0xe9, 0x6f, 0xd0, 0x3e, 0x90, 0x6e, 0xdc, 0x50, 0xbc, 0x6c, 0xdd, 0x0d, 0x6d, 0xe2, 0x9f, 0x7b, 0xcb, 0xbc, 0x8a, 0xd2, 0x42, 0x3a, 0x0a, 0x04, 0xcd, 0x3b, 0xb6, 0xf2, 0x3d, 0x49}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xd8, 0xa6, 0x91, 0x12, 0x82, 0x4b, 0x98, 0xd3, 0x85, 0x7f, 0xa0, 0x85, 0x49, 0x4a, 0x76, 0x86, 0xa0, 0xfc, 0xa8, 0x07, 0x14, 0x88, 0xc1, 0x39, 0x3f, 0x20, 0x34, 0x48, 0x42, 0x12, 0xf0, 0x84}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
+ qemu_QEMUVTPM = M{4: {Expected: []byte{0xdb, 0x1b, 0x60, 0xdb, 0x08, 0x71, 0xca, 0x1e, 0x38, 0x23, 0x72, 0x55, 0x23, 0xb2, 0x36, 0x11, 0x20, 0xd0, 0x38, 0x32, 0x73, 0x75, 0x49, 0xcb, 0x30, 0x8b, 0x2e, 0xa4, 0xe4, 0xcb, 0xce, 0x57}, ValidationOpt: Enforce}, 8: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 9: {Expected: []byte{0xa4, 0x93, 0x81, 0xfa, 0x15, 0x8f, 0xbe, 0xa2, 0x20, 0x0c, 0xec, 0xb1, 0x50, 0xee, 0x79, 0x8f, 0xba, 0x74, 0x28, 0x53, 0x6f, 0x44, 0x7e, 0xe8, 0xe9, 0xfc, 0x56, 0xa0, 0x9c, 0x72, 0x3b, 0xb9}, ValidationOpt: Enforce}, 11: {Expected: []byte{0xdc, 0x9d, 0x30, 0xe0, 0xe3, 0x89, 0x43, 0x5c, 0x66, 0x4f, 0x37, 0xdb, 0x4d, 0x2e, 0xf7, 0xb5, 0xfa, 0x0d, 0x02, 0x2b, 0xf2, 0xea, 0x4c, 0x60, 0x08, 0x47, 0xea, 0x72, 0x43, 0x6d, 0xef, 0x3f}, ValidationOpt: Enforce}, 12: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 13: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}, 15: {Expected: []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, ValidationOpt: Enforce}}
)
diff --git a/internal/attestation/snp/BUILD.bazel b/internal/attestation/snp/BUILD.bazel
index 700a3aa86..f62518f25 100644
--- a/internal/attestation/snp/BUILD.bazel
+++ b/internal/attestation/snp/BUILD.bazel
@@ -8,11 +8,11 @@ go_library(
visibility = ["//:__subpackages__"],
deps = [
"//internal/attestation",
- "//internal/constants",
"@com_github_google_go_sev_guest//abi",
"@com_github_google_go_sev_guest//kds",
"@com_github_google_go_sev_guest//proto/sevsnp",
"@com_github_google_go_sev_guest//verify/trust",
+ "@com_github_google_go_tpm_tools//proto/attest",
],
)
diff --git a/internal/attestation/snp/snp.go b/internal/attestation/snp/snp.go
index 95cba55bf..685af7792 100644
--- a/internal/attestation/snp/snp.go
+++ b/internal/attestation/snp/snp.go
@@ -12,16 +12,19 @@ import (
"bytes"
"crypto/x509"
"encoding/pem"
+ "errors"
"fmt"
"github.com/edgelesssys/constellation/v2/internal/attestation"
- "github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/google/go-sev-guest/abi"
"github.com/google/go-sev-guest/kds"
spb "github.com/google/go-sev-guest/proto/sevsnp"
"github.com/google/go-sev-guest/verify/trust"
+ "github.com/google/go-tpm-tools/proto/attest"
)
+var errNoPemBlocks = errors.New("no PEM blocks found")
+
// Product returns the SEV product info currently supported by Constellation's SNP attestation.
func Product() *spb.SevProduct {
// sevProduct is the product info of the SEV platform as reported through CPUID[EAX=1].
@@ -39,6 +42,7 @@ type InstanceInfo struct {
// AttestationReport is the attestation report from the vTPM (NVRAM) of the CVM.
AttestationReport []byte
Azure *AzureInstanceInfo
+ GCP *attest.GCEInstanceInfo
}
// AzureInstanceInfo contains Azure specific information related to SNP attestation.
@@ -95,7 +99,7 @@ func (a *InstanceInfo) addReportSigner(att *spb.Attestation, report *spb.Report,
// AttestationWithCerts returns a formatted version of the attestation report and its certificates from the instanceInfo.
// Certificates are retrieved in the following precedence:
-// 1. ASK or ARK from issuer. On Azure: THIM. One AWS: not prefilled.
+// 1. ASK from issuer. On Azure: THIM. One AWS: not prefilled. (Go to option 2) On GCP: prefilled.
// 2. ASK or ARK from fallbackCerts.
// 3. ASK or ARK from AMD KDS.
func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter,
@@ -120,30 +124,28 @@ func (a *InstanceInfo) AttestationWithCerts(getter trust.HTTPSGetter,
return nil, fmt.Errorf("adding report signer: %w", err)
}
- // If the certificate chain from THIM is present, parse it and format it.
- ask, ark, err := a.ParseCertChain()
- if err != nil {
+ // If a certificate chain was pre-fetched by the Issuer, parse it and format it.
+ // Make sure to only use the ask, since using an ark from the Issuer would invalidate security guarantees.
+ ask, _, err := a.ParseCertChain()
+ if err != nil && !errors.Is(err, errNoPemBlocks) {
logger.Warn(fmt.Sprintf("Error parsing certificate chain: %v", err))
}
if ask != nil {
- logger.Info("Using ASK certificate from Azure THIM")
+ logger.Info("Using ASK certificate from pre-fetched certificate chain")
att.CertificateChain.AskCert = ask.Raw
}
- if ark != nil {
- logger.Info("Using ARK certificate from Azure THIM")
- att.CertificateChain.ArkCert = ark.Raw
- }
// If a cached ASK or an ARK from the Constellation config is present, use it.
if att.CertificateChain.AskCert == nil && fallbackCerts.ask != nil {
logger.Info("Using cached ASK certificate")
att.CertificateChain.AskCert = fallbackCerts.ask.Raw
}
- if att.CertificateChain.ArkCert == nil && fallbackCerts.ark != nil {
- logger.Info(fmt.Sprintf("Using ARK certificate from %s", constants.ConfigFilename))
+ if fallbackCerts.ark != nil {
+ logger.Info("Using cached ARK certificate")
att.CertificateChain.ArkCert = fallbackCerts.ark.Raw
}
- // Otherwise, retrieve it from AMD KDS.
+
+ // Otherwise, retrieve missing certificates from AMD KDS.
if att.CertificateChain.AskCert == nil || att.CertificateChain.ArkCert == nil {
logger.Info(fmt.Sprintf(
"Certificate chain not fully present (ARK present: %t, ASK present: %t), falling back to retrieving it from AMD KDS",
@@ -223,7 +225,7 @@ func (a *InstanceInfo) ParseCertChain() (ask, ark *x509.Certificate, retErr erro
switch {
case i == 1:
- retErr = fmt.Errorf("no PEM blocks found")
+ retErr = errNoPemBlocks
case len(rest) != 0:
retErr = fmt.Errorf("remaining PEM block is not a valid certificate: %s", rest)
}
diff --git a/internal/attestation/snp/snp_test.go b/internal/attestation/snp/snp_test.go
index 0179ac05b..366a3ba4a 100644
--- a/internal/attestation/snp/snp_test.go
+++ b/internal/attestation/snp/snp_test.go
@@ -9,6 +9,7 @@ package snp
import (
"crypto/x509"
"encoding/hex"
+ "errors"
"fmt"
"regexp"
"strings"
@@ -34,16 +35,13 @@ func TestParseCertChain(t *testing.T) {
wantAsk bool
wantArk bool
wantErr bool
+ errTarget error
}{
"success": {
certChain: defaultCertChain,
wantAsk: true,
wantArk: true,
},
- "empty cert chain": {
- certChain: []byte{},
- wantErr: true,
- },
"more than two certificates": {
certChain: append(defaultCertChain, defaultCertChain...),
wantErr: true,
@@ -52,6 +50,11 @@ func TestParseCertChain(t *testing.T) {
certChain: []byte("invalid"),
wantErr: true,
},
+ "empty cert chain": {
+ certChain: []byte{},
+ wantErr: true,
+ errTarget: errNoPemBlocks,
+ },
"ark missing": {
certChain: []byte(askOnly),
wantAsk: true,
@@ -73,6 +76,9 @@ func TestParseCertChain(t *testing.T) {
ask, ark, err := instanceInfo.ParseCertChain()
if tc.wantErr {
assert.Error(err)
+ if tc.errTarget != nil {
+ assert.True(errors.Is(err, tc.errTarget))
+ }
} else {
assert.NoError(err)
assert.Equal(tc.wantAsk, ask != nil)
@@ -149,12 +155,24 @@ func TestAttestationWithCerts(t *testing.T) {
wantErr bool
}{
"success": {
+ report: defaultReport,
+ idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1",
+ reportSigner: testdata.AzureThimVCEK,
+ certChain: testdata.CertChain,
+ fallbackCerts: CertificateChain{ark: testdataArk},
+ expectedArk: testdataArk,
+ expectedAsk: testdataAsk,
+ getter: newStubHTTPSGetter(&urlResponseMatcher{}, nil),
+ },
+ "ark only in pre-fetched cert-chain": {
report: defaultReport,
idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1",
reportSigner: testdata.AzureThimVCEK,
certChain: testdata.CertChain,
expectedArk: testdataArk,
expectedAsk: testdataAsk,
+ getter: newStubHTTPSGetter(nil, assert.AnError),
+ wantErr: true,
},
"vlek success": {
report: vlekReport,
@@ -173,9 +191,10 @@ func TestAttestationWithCerts(t *testing.T) {
),
},
"retrieve vcek": {
- report: defaultReport,
- idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1",
- certChain: testdata.CertChain,
+ report: defaultReport,
+ idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1",
+ certChain: testdata.CertChain,
+ fallbackCerts: CertificateChain{ark: testdataArk},
getter: newStubHTTPSGetter(
&urlResponseMatcher{
vcekResponse: testdata.AmdKdsVCEK,
@@ -205,25 +224,9 @@ func TestAttestationWithCerts(t *testing.T) {
idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1",
reportSigner: testdata.AzureThimVCEK,
fallbackCerts: NewCertificateChain(exampleCert, exampleCert),
- getter: newStubHTTPSGetter(
- &urlResponseMatcher{},
- nil,
- ),
- expectedArk: exampleCert,
- expectedAsk: exampleCert,
- },
- "use certchain with fallback certs": {
- report: defaultReport,
- idkeydigest: "57e229e0ffe5fa92d0faddff6cae0e61c926fc9ef9afd20a8b8cfcf7129db9338cbe5bf3f6987733a2bf65d06dc38fc1",
- certChain: testdata.CertChain,
- reportSigner: testdata.AzureThimVCEK,
- fallbackCerts: NewCertificateChain(&x509.Certificate{}, &x509.Certificate{}),
- getter: newStubHTTPSGetter(
- &urlResponseMatcher{},
- nil,
- ),
- expectedArk: testdataArk,
- expectedAsk: testdataAsk,
+ getter: newStubHTTPSGetter(&urlResponseMatcher{}, nil),
+ expectedArk: exampleCert,
+ expectedAsk: exampleCert,
},
"retrieve vcek and certchain": {
report: defaultReport,
@@ -242,10 +245,12 @@ func TestAttestationWithCerts(t *testing.T) {
},
"report too short": {
report: defaultReport[:len(defaultReport)-100],
+ getter: newStubHTTPSGetter(nil, assert.AnError),
wantErr: true,
},
"corrupted report": {
report: defaultReport[10 : len(defaultReport)-10],
+ getter: newStubHTTPSGetter(nil, assert.AnError),
wantErr: true,
},
"certificate fetch error": {
diff --git a/internal/cloud/openstack/clouds/BUILD.bazel b/internal/cloud/openstack/clouds/BUILD.bazel
new file mode 100644
index 000000000..153bed763
--- /dev/null
+++ b/internal/cloud/openstack/clouds/BUILD.bazel
@@ -0,0 +1,15 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "clouds",
+ srcs = [
+ "clouds.go",
+ "read.go",
+ ],
+ importpath = "github.com/edgelesssys/constellation/v2/internal/cloud/openstack/clouds",
+ visibility = ["//:__subpackages__"],
+ deps = [
+ "//internal/file",
+ "@com_github_mitchellh_go_homedir//:go-homedir",
+ ],
+)
diff --git a/internal/cloud/openstack/clouds/LICENSE b/internal/cloud/openstack/clouds/LICENSE
new file mode 100644
index 000000000..b1da7201f
--- /dev/null
+++ b/internal/cloud/openstack/clouds/LICENSE
@@ -0,0 +1,193 @@
+Copyright 2012-2013 Rackspace, Inc.
+Copyright Gophercloud authors
+Copyright (c) Edgeless Systems GmbH
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/internal/cloud/openstack/clouds/clouds.go b/internal/cloud/openstack/clouds/clouds.go
new file mode 100644
index 000000000..923325fe2
--- /dev/null
+++ b/internal/cloud/openstack/clouds/clouds.go
@@ -0,0 +1,208 @@
+/*
+Copyright 2012-2013 Rackspace, Inc.
+Copyright Gophercloud authors
+Copyright (c) Edgeless Systems GmbH
+
+SPDX-License-Identifier: Apache-2.0
+*/
+package clouds
+
+import "encoding/json"
+
+// Clouds represents a collection of Cloud entries in a clouds.yaml file.
+type Clouds struct {
+ Clouds map[string]Cloud `yaml:"clouds" json:"clouds"`
+}
+
+// Cloud represents an entry in a clouds.yaml/public-clouds.yaml/secure.yaml file.
+type Cloud struct {
+ Cloud string `yaml:"cloud,omitempty" json:"cloud,omitempty"`
+ Profile string `yaml:"profile,omitempty" json:"profile,omitempty"`
+ AuthInfo *AuthInfo `yaml:"auth,omitempty" json:"auth,omitempty"`
+ AuthType AuthType `yaml:"auth_type,omitempty" json:"auth_type,omitempty"`
+ RegionName string `yaml:"region_name,omitempty" json:"region_name,omitempty"`
+ Regions []Region `yaml:"regions,omitempty" json:"regions,omitempty"`
+
+ // EndpointType and Interface both specify whether to use the public, internal,
+ // or admin interface of a service. They should be considered synonymous, but
+ // EndpointType will take precedence when both are specified.
+ EndpointType string `yaml:"endpoint_type,omitempty" json:"endpoint_type,omitempty"`
+ Interface string `yaml:"interface,omitempty" json:"interface,omitempty"`
+
+ // API Version overrides.
+ IdentityAPIVersion string `yaml:"identity_api_version,omitempty" json:"identity_api_version,omitempty"`
+ VolumeAPIVersion string `yaml:"volume_api_version,omitempty" json:"volume_api_version,omitempty"`
+
+ // Verify whether or not SSL API requests should be verified.
+ Verify *bool `yaml:"verify,omitempty" json:"verify,omitempty"`
+
+ // CACertFile a path to a CA Cert bundle that can be used as part of
+ // verifying SSL API requests.
+ CACertFile string `yaml:"cacert,omitempty" json:"cacert,omitempty"`
+
+ // ClientCertFile a path to a client certificate to use as part of the SSL
+ // transaction.
+ ClientCertFile string `yaml:"cert,omitempty" json:"cert,omitempty"`
+
+ // ClientKeyFile a path to a client key to use as part of the SSL
+ // transaction.
+ ClientKeyFile string `yaml:"key,omitempty" json:"key,omitempty"`
+}
+
+// AuthInfo represents the auth section of a cloud entry or
+// auth options entered explicitly in ClientOpts.
+type AuthInfo struct {
+ // AuthURL is the keystone/identity endpoint URL.
+ AuthURL string `yaml:"auth_url,omitempty" json:"auth_url,omitempty"`
+
+ // Token is a pre-generated authentication token.
+ Token string `yaml:"token,omitempty" json:"token,omitempty"`
+
+ // Username is the username of the user.
+ Username string `yaml:"username,omitempty" json:"username,omitempty"`
+
+ // UserID is the unique ID of a user.
+ UserID string `yaml:"user_id,omitempty" json:"user_id,omitempty"`
+
+ // Password is the password of the user.
+ Password string `yaml:"password,omitempty" json:"password,omitempty"`
+
+ // Application Credential ID to login with.
+ ApplicationCredentialID string `yaml:"application_credential_id,omitempty" json:"application_credential_id,omitempty"`
+
+ // Application Credential name to login with.
+ ApplicationCredentialName string `yaml:"application_credential_name,omitempty" json:"application_credential_name,omitempty"`
+
+ // Application Credential secret to login with.
+ ApplicationCredentialSecret string `yaml:"application_credential_secret,omitempty" json:"application_credential_secret,omitempty"`
+
+ // SystemScope is a system information to scope to.
+ SystemScope string `yaml:"system_scope,omitempty" json:"system_scope,omitempty"`
+
+ // ProjectName is the common/human-readable name of a project.
+ // Users can be scoped to a project.
+ // ProjectName on its own is not enough to ensure a unique scope. It must
+ // also be combined with either a ProjectDomainName or ProjectDomainID.
+ // ProjectName cannot be combined with ProjectID in a scope.
+ ProjectName string `yaml:"project_name,omitempty" json:"project_name,omitempty"`
+
+ // ProjectID is the unique ID of a project.
+ // It can be used to scope a user to a specific project.
+ ProjectID string `yaml:"project_id,omitempty" json:"project_id,omitempty"`
+
+ // UserDomainName is the name of the domain where a user resides.
+ // It is used to identify the source domain of a user.
+ UserDomainName string `yaml:"user_domain_name,omitempty" json:"user_domain_name,omitempty"`
+
+ // UserDomainID is the unique ID of the domain where a user resides.
+ // It is used to identify the source domain of a user.
+ UserDomainID string `yaml:"user_domain_id,omitempty" json:"user_domain_id,omitempty"`
+
+ // ProjectDomainName is the name of the domain where a project resides.
+ // It is used to identify the source domain of a project.
+ // ProjectDomainName can be used in addition to a ProjectName when scoping
+ // a user to a specific project.
+ ProjectDomainName string `yaml:"project_domain_name,omitempty" json:"project_domain_name,omitempty"`
+
+ // ProjectDomainID is the name of the domain where a project resides.
+ // It is used to identify the source domain of a project.
+ // ProjectDomainID can be used in addition to a ProjectName when scoping
+ // a user to a specific project.
+ ProjectDomainID string `yaml:"project_domain_id,omitempty" json:"project_domain_id,omitempty"`
+
+ // DomainName is the name of a domain which can be used to identify the
+ // source domain of either a user or a project.
+ // If UserDomainName and ProjectDomainName are not specified, then DomainName
+ // is used as a default choice.
+ // It can also be used be used to specify a domain-only scope.
+ DomainName string `yaml:"domain_name,omitempty" json:"domain_name,omitempty"`
+
+ // DomainID is the unique ID of a domain which can be used to identify the
+ // source domain of eitehr a user or a project.
+ // If UserDomainID and ProjectDomainID are not specified, then DomainID is
+ // used as a default choice.
+ // It can also be used be used to specify a domain-only scope.
+ DomainID string `yaml:"domain_id,omitempty" json:"domain_id,omitempty"`
+
+ // DefaultDomain is the domain ID to fall back on if no other domain has
+ // been specified and a domain is required for scope.
+ DefaultDomain string `yaml:"default_domain,omitempty" json:"default_domain,omitempty"`
+
+ // AllowReauth should be set to true if you grant permission for Gophercloud to
+ // cache your credentials in memory, and to allow Gophercloud to attempt to
+ // re-authenticate automatically if/when your token expires. If you set it to
+ // false, it will not cache these settings, but re-authentication will not be
+ // possible. This setting defaults to false.
+ AllowReauth bool `yaml:"allow_reauth,omitempty" json:"allow_reauth,omitempty"`
+}
+
+// Region represents a region included as part of cloud in clouds.yaml
+// According to Python-based openstacksdk, this can be either a struct (as defined)
+// or a plain string. Custom unmarshallers handle both cases.
+type Region struct {
+ Name string `yaml:"name,omitempty" json:"name,omitempty"`
+ Values Cloud `yaml:"values,omitempty" json:"values,omitempty"`
+}
+
+// UnmarshalJSON handles either a plain string acting as the Name property or
+// a struct, mimicking the Python-based openstacksdk.
+func (r *Region) UnmarshalJSON(data []byte) error {
+ var name string
+ if err := json.Unmarshal(data, &name); err == nil {
+ r.Name = name
+ return nil
+ }
+
+ type region Region
+ var tmp region
+ if err := json.Unmarshal(data, &tmp); err != nil {
+ return err
+ }
+ r.Name = tmp.Name
+ r.Values = tmp.Values
+
+ return nil
+}
+
+// UnmarshalYAML handles either a plain string acting as the Name property or
+// a struct, mimicking the Python-based openstacksdk.
+func (r *Region) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var name string
+ if err := unmarshal(&name); err == nil {
+ r.Name = name
+ return nil
+ }
+
+ type region Region
+ var tmp region
+ if err := unmarshal(&tmp); err != nil {
+ return err
+ }
+ r.Name = tmp.Name
+ r.Values = tmp.Values
+
+ return nil
+}
+
+// AuthType respresents a valid method of authentication.
+type AuthType string
+
+const (
+ // AuthPassword defines an unknown version of the password.
+ AuthPassword AuthType = "password"
+ // AuthToken defined an unknown version of the token.
+ AuthToken AuthType = "token"
+
+ // AuthV2Password defines version 2 of the password.
+ AuthV2Password AuthType = "v2password"
+ // AuthV2Token defines version 2 of the token.
+ AuthV2Token AuthType = "v2token"
+
+ // AuthV3Password defines version 3 of the password.
+ AuthV3Password AuthType = "v3password"
+ // AuthV3Token defines version 3 of the token.
+ AuthV3Token AuthType = "v3token"
+
+ // AuthV3ApplicationCredential defines version 3 of the application credential.
+ AuthV3ApplicationCredential AuthType = "v3applicationcredential"
+)
diff --git a/internal/cloud/openstack/clouds/read.go b/internal/cloud/openstack/clouds/read.go
new file mode 100644
index 000000000..d4259c338
--- /dev/null
+++ b/internal/cloud/openstack/clouds/read.go
@@ -0,0 +1,59 @@
+/*
+Copyright (c) Edgeless Systems GmbH
+
+SPDX-License-Identifier: AGPL-3.0-only
+*/
+package clouds
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/mitchellh/go-homedir"
+
+ "github.com/edgelesssys/constellation/v2/internal/file"
+)
+
+// ReadCloudsYAML reads a clouds.yaml file and returns its contents.
+func ReadCloudsYAML(fileHandler file.Handler, path string) (Clouds, error) {
+ // Order of operations as performed by the OpenStack CLI:
+
+ // Define a search path for clouds.yaml:
+ // 1. If OS_CLIENT_CONFIG_FILE is set, use it as search path
+ // 2. Otherwise, use the following paths:
+ // - current directory
+ // - `openstack` directory under standard user config directory (e.g. ~/.config/openstack)
+ // - /etc/openstack (Unix only)
+
+ var searchPaths []string
+ if path != "" {
+ expanded, err := homedir.Expand(path)
+ if err == nil {
+ searchPaths = append(searchPaths, expanded)
+ } else {
+ searchPaths = append(searchPaths, path)
+ }
+ } else if osClientConfigFile := os.Getenv("OS_CLIENT_CONFIG_FILE"); osClientConfigFile != "" {
+ searchPaths = append(searchPaths, filepath.Join(osClientConfigFile, "clouds.yaml"))
+ } else {
+ searchPaths = append(searchPaths, "clouds.yaml")
+ confDir, err := os.UserConfigDir()
+ if err != nil {
+ return Clouds{}, fmt.Errorf("getting user config directory: %w", err)
+ }
+ searchPaths = append(searchPaths, filepath.Join(confDir, "openstack", "clouds.yaml"))
+ if os.PathSeparator == '/' {
+ searchPaths = append(searchPaths, "/etc/openstack/clouds.yaml")
+ }
+ }
+
+ var cloudsYAML Clouds
+ for _, path := range searchPaths {
+ if err := fileHandler.ReadYAML(path, &cloudsYAML); err == nil {
+ return cloudsYAML, nil
+ }
+ }
+
+ return Clouds{}, fmt.Errorf("clouds.yaml not found in search paths: %v", searchPaths)
+}
diff --git a/internal/cloud/openstack/imds.go b/internal/cloud/openstack/imds.go
index c9e3332c8..792a0d881 100644
--- a/internal/cloud/openstack/imds.go
+++ b/internal/cloud/openstack/imds.go
@@ -23,6 +23,7 @@ import (
const (
imdsMetaDataURL = "http://169.254.169.254/openstack/2018-08-27/meta_data.json"
+ imdsUserDataURL = "http://169.254.169.254/openstack/2018-08-27/user_data"
ec2ImdsBaseURL = "http://169.254.169.254/1.0/meta-data"
maxCacheAge = 12 * time.Hour
)
@@ -33,6 +34,7 @@ type imdsClient struct {
vpcIPCache string
vpcIPCacheTime time.Time
cache metadataResponse
+ userDataCache userDataResponse
cacheTime time.Time
}
@@ -129,73 +131,73 @@ func (c *imdsClient) role(ctx context.Context) (role.Role, error) {
}
func (c *imdsClient) loadBalancerEndpoint(ctx context.Context) (string, error) {
- if c.timeForUpdate(c.cacheTime) || c.cache.Tags.LoadBalancerEndpoint == "" {
+ if c.timeForUpdate(c.cacheTime) || c.userDataCache.LoadBalancerEndpoint == "" {
if err := c.update(ctx); err != nil {
return "", err
}
}
- if c.cache.Tags.LoadBalancerEndpoint == "" {
+ if c.userDataCache.LoadBalancerEndpoint == "" {
return "", errors.New("unable to get load balancer endpoint")
}
- return c.cache.Tags.LoadBalancerEndpoint, nil
+ return c.userDataCache.LoadBalancerEndpoint, nil
}
func (c *imdsClient) authURL(ctx context.Context) (string, error) {
- if c.timeForUpdate(c.cacheTime) || c.cache.Tags.AuthURL == "" {
+ if c.timeForUpdate(c.cacheTime) || c.userDataCache.AuthURL == "" {
if err := c.update(ctx); err != nil {
return "", err
}
}
- if c.cache.Tags.AuthURL == "" {
+ if c.userDataCache.AuthURL == "" {
return "", errors.New("unable to get auth url")
}
- return c.cache.Tags.AuthURL, nil
+ return c.userDataCache.AuthURL, nil
}
func (c *imdsClient) userDomainName(ctx context.Context) (string, error) {
- if c.timeForUpdate(c.cacheTime) || c.cache.Tags.UserDomainName == "" {
+ if c.timeForUpdate(c.cacheTime) || c.userDataCache.UserDomainName == "" {
if err := c.update(ctx); err != nil {
return "", err
}
}
- if c.cache.Tags.UserDomainName == "" {
+ if c.userDataCache.UserDomainName == "" {
return "", errors.New("unable to get user domain name")
}
- return c.cache.Tags.UserDomainName, nil
+ return c.userDataCache.UserDomainName, nil
}
func (c *imdsClient) username(ctx context.Context) (string, error) {
- if c.timeForUpdate(c.cacheTime) || c.cache.Tags.Username == "" {
+ if c.timeForUpdate(c.cacheTime) || c.userDataCache.Username == "" {
if err := c.update(ctx); err != nil {
return "", err
}
}
- if c.cache.Tags.Username == "" {
+ if c.userDataCache.Username == "" {
return "", errors.New("unable to get token name")
}
- return c.cache.Tags.Username, nil
+ return c.userDataCache.Username, nil
}
func (c *imdsClient) password(ctx context.Context) (string, error) {
- if c.timeForUpdate(c.cacheTime) || c.cache.Tags.Password == "" {
+ if c.timeForUpdate(c.cacheTime) || c.userDataCache.Password == "" {
if err := c.update(ctx); err != nil {
return "", err
}
}
- if c.cache.Tags.Password == "" {
+ if c.userDataCache.Password == "" {
return "", errors.New("unable to get token password")
}
- return c.cache.Tags.Password, nil
+ return c.userDataCache.Password, nil
}
// timeForUpdate checks whether an update is needed due to cache age.
@@ -203,18 +205,41 @@ func (c *imdsClient) timeForUpdate(t time.Time) bool {
return time.Since(t) > maxCacheAge
}
-// update updates instance metadata from the azure imds API.
func (c *imdsClient) update(ctx context.Context) error {
+ if err := c.updateInstanceMetadata(ctx); err != nil {
+ return fmt.Errorf("updating instance metadata: %w", err)
+ }
+ if err := c.updateUserData(ctx); err != nil {
+ return fmt.Errorf("updating user data: %w", err)
+ }
+ c.cacheTime = time.Now()
+ return nil
+}
+
+// update updates instance metadata from the azure imds API.
+func (c *imdsClient) updateInstanceMetadata(ctx context.Context) error {
resp, err := httpGet(ctx, c.client, imdsMetaDataURL)
if err != nil {
return err
}
var metadataResp metadataResponse
if err := json.Unmarshal(resp, &metadataResp); err != nil {
- return err
+ return fmt.Errorf("unmarshalling IMDS metadata response %q: %w", resp, err)
}
c.cache = metadataResp
- c.cacheTime = time.Now()
+ return nil
+}
+
+func (c *imdsClient) updateUserData(ctx context.Context) error {
+ resp, err := httpGet(ctx, c.client, imdsUserDataURL)
+ if err != nil {
+ return err
+ }
+ var userdataResp userDataResponse
+ if err := json.Unmarshal(resp, &userdataResp); err != nil {
+ return fmt.Errorf("unmarshalling IMDS user_data response %q: %w", resp, err)
+ }
+ c.userDataCache = userdataResp
return nil
}
@@ -244,7 +269,10 @@ func httpGet(ctx context.Context, c httpClient, url string) ([]byte, error) {
}
resp, err := c.Do(req)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("querying the OpenStack IMDS api failed for %q: %w", url, err)
+ }
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ return nil, fmt.Errorf("IMDS api might be broken for this server. Recreate the cluster if this issue persists. Querying the OpenStack IMDS api failed for %q with error code %d", url, resp.StatusCode)
}
defer resp.Body.Close()
return io.ReadAll(resp.Body)
@@ -259,9 +287,12 @@ type metadataResponse struct {
}
type metadataTags struct {
- InitSecretHash string `json:"constellation-init-secret-hash,omitempty"`
- Role string `json:"constellation-role,omitempty"`
- UID string `json:"constellation-uid,omitempty"`
+ InitSecretHash string `json:"constellation-init-secret-hash,omitempty"`
+ Role string `json:"constellation-role,omitempty"`
+ UID string `json:"constellation-uid,omitempty"`
+}
+
+type userDataResponse struct {
AuthURL string `json:"openstack-auth-url,omitempty"`
UserDomainName string `json:"openstack-user-domain-name,omitempty"`
Username string `json:"openstack-username,omitempty"`
diff --git a/internal/cloud/openstack/imds_test.go b/internal/cloud/openstack/imds_test.go
index 57430bb8c..ce45dbd3d 100644
--- a/internal/cloud/openstack/imds_test.go
+++ b/internal/cloud/openstack/imds_test.go
@@ -26,7 +26,7 @@ func TestProviderID(t *testing.T) {
someErr := errors.New("failed")
type testCase struct {
- cache metadataResponse
+ cache any
cacheTime time.Time
newClient httpClientJSONCreateFunc
wantResult string
@@ -34,7 +34,7 @@ func TestProviderID(t *testing.T) {
wantErr bool
}
- newTestCases := func(mResp1, mResp2 metadataResponse, expect1, expect2 string) map[string]testCase {
+ newTestCases := func(mResp1, mResp2 any, expect1, expect2 string) map[string]testCase {
return map[string]testCase{
"cached": {
cache: mResp1,
@@ -43,30 +43,30 @@ func TestProviderID(t *testing.T) {
wantCall: false,
},
"from http": {
- newClient: newStubHTTPClientJSONFunc(mResp1, nil),
+ newClient: newStubHTTPClientJSONFunc(mResp1, 200, nil),
wantResult: expect1,
wantCall: true,
},
"cache outdated": {
cache: mResp1,
cacheTime: time.Now().AddDate(0, 0, -1),
- newClient: newStubHTTPClientJSONFunc(mResp2, nil),
+ newClient: newStubHTTPClientJSONFunc(mResp2, 200, nil),
wantResult: expect2,
wantCall: true,
},
"cache empty": {
cacheTime: time.Now(),
- newClient: newStubHTTPClientJSONFunc(mResp1, nil),
+ newClient: newStubHTTPClientJSONFunc(mResp1, 200, nil),
wantResult: expect1,
wantCall: true,
},
"http error": {
- newClient: newStubHTTPClientJSONFunc(metadataResponse{}, someErr),
+ newClient: newStubHTTPClientJSONFunc(metadataResponse{}, 200, someErr),
wantCall: true,
wantErr: true,
},
"http empty response": {
- newClient: newStubHTTPClientJSONFunc(metadataResponse{}, nil),
+ newClient: newStubHTTPClientJSONFunc(metadataResponse{}, 200, nil),
wantCall: true,
wantErr: true,
},
@@ -120,32 +120,32 @@ func TestProviderID(t *testing.T) {
"authURL": {
method: (*imdsClient).authURL,
testCases: newTestCases(
- metadataResponse{Tags: metadataTags{AuthURL: "authURL1"}},
- metadataResponse{Tags: metadataTags{AuthURL: "authURL2"}},
+ userDataResponse{AuthURL: "authURL1"},
+ userDataResponse{AuthURL: "authURL2"},
"authURL1", "authURL2",
),
},
"userDomainName": {
method: (*imdsClient).userDomainName,
testCases: newTestCases(
- metadataResponse{Tags: metadataTags{UserDomainName: "userDomainName1"}},
- metadataResponse{Tags: metadataTags{UserDomainName: "userDomainName2"}},
+ userDataResponse{UserDomainName: "userDomainName1"},
+ userDataResponse{UserDomainName: "userDomainName2"},
"userDomainName1", "userDomainName2",
),
},
"username": {
method: (*imdsClient).username,
testCases: newTestCases(
- metadataResponse{Tags: metadataTags{Username: "username1"}},
- metadataResponse{Tags: metadataTags{Username: "username2"}},
+ userDataResponse{Username: "username1"},
+ userDataResponse{Username: "username2"},
"username1", "username2",
),
},
"password": {
method: (*imdsClient).password,
testCases: newTestCases(
- metadataResponse{Tags: metadataTags{Password: "password1"}},
- metadataResponse{Tags: metadataTags{Password: "password2"}},
+ userDataResponse{Password: "password1"},
+ userDataResponse{Password: "password2"},
"password1", "password2",
),
},
@@ -162,10 +162,18 @@ func TestProviderID(t *testing.T) {
if tc.newClient != nil {
client = tc.newClient(require)
}
+ var cache metadataResponse
+ var userDataCache userDataResponse
+ if _, ok := tc.cache.(metadataResponse); ok {
+ cache = tc.cache.(metadataResponse)
+ } else if _, ok := tc.cache.(userDataResponse); ok {
+ userDataCache = tc.cache.(userDataResponse)
+ }
imds := &imdsClient{
- client: client,
- cache: tc.cache,
- cacheTime: tc.cacheTime,
+ client: client,
+ cache: cache,
+ userDataCache: userDataCache,
+ cacheTime: tc.cacheTime,
}
result, err := tu.method(imds, context.Background())
@@ -207,30 +215,35 @@ func TestRole(t *testing.T) {
wantCall: false,
},
"from http": {
- newClient: newStubHTTPClientJSONFunc(mResp1, nil),
+ newClient: newStubHTTPClientJSONFunc(mResp1, 200, nil),
wantResult: expect1,
wantCall: true,
},
"cache outdated": {
cache: mResp1,
cacheTime: time.Now().AddDate(0, 0, -1),
- newClient: newStubHTTPClientJSONFunc(mResp2, nil),
+ newClient: newStubHTTPClientJSONFunc(mResp2, 200, nil),
wantResult: expect2,
wantCall: true,
},
"cache empty": {
cacheTime: time.Now(),
- newClient: newStubHTTPClientJSONFunc(mResp1, nil),
+ newClient: newStubHTTPClientJSONFunc(mResp1, 200, nil),
wantResult: expect1,
wantCall: true,
},
"http error": {
- newClient: newStubHTTPClientJSONFunc(metadataResponse{}, someErr),
+ newClient: newStubHTTPClientJSONFunc(metadataResponse{}, 200, someErr),
+ wantCall: true,
+ wantErr: true,
+ },
+ "http status code 500": {
+ newClient: newStubHTTPClientJSONFunc(metadataResponse{}, 500, nil),
wantCall: true,
wantErr: true,
},
"http empty response": {
- newClient: newStubHTTPClientJSONFunc(metadataResponse{}, nil),
+ newClient: newStubHTTPClientJSONFunc(metadataResponse{}, 200, nil),
wantCall: true,
wantErr: true,
},
@@ -368,15 +381,17 @@ type httpClientJSONCreateFunc func(r *require.Assertions) *stubHTTPClientJSON
type stubHTTPClientJSON struct {
require *require.Assertions
- response metadataResponse
+ response any
+ code int
err error
called bool
}
-func newStubHTTPClientJSONFunc(response metadataResponse, err error) httpClientJSONCreateFunc {
+func newStubHTTPClientJSONFunc(response any, statusCode int, err error) httpClientJSONCreateFunc {
return func(r *require.Assertions) *stubHTTPClientJSON {
return &stubHTTPClientJSON{
response: response,
+ code: statusCode,
err: err,
require: r,
}
@@ -387,16 +402,26 @@ func (c *stubHTTPClientJSON) Do(_ *http.Request) (*http.Response, error) {
c.called = true
body, err := json.Marshal(c.response)
c.require.NoError(err)
- return &http.Response{Body: io.NopCloser(bytes.NewReader(body))}, c.err
+ code := 200
+ if c.code != 0 {
+ code = c.code
+ }
+ return &http.Response{StatusCode: code, Status: http.StatusText(code), Body: io.NopCloser(bytes.NewReader(body))}, c.err
}
type stubHTTPClient struct {
response string
+ code int
err error
called bool
}
func (c *stubHTTPClient) Do(_ *http.Request) (*http.Response, error) {
c.called = true
- return &http.Response{Body: io.NopCloser(strings.NewReader(c.response))}, c.err
+ code := 200
+ if c.code != 0 {
+ code = c.code
+ }
+
+ return &http.Response{StatusCode: code, Status: http.StatusText(code), Body: io.NopCloser(strings.NewReader(c.response))}, c.err
}
diff --git a/internal/cloud/openstack/openstack.go b/internal/cloud/openstack/openstack.go
index 2e16014f0..9472b3068 100644
--- a/internal/cloud/openstack/openstack.go
+++ b/internal/cloud/openstack/openstack.go
@@ -28,14 +28,14 @@ const (
microversion = "2.42"
)
-// Cloud is the metadata client for OpenStack.
-type Cloud struct {
+// MetadataClient is the metadata client for OpenStack.
+type MetadataClient struct {
api serversAPI
imds imdsAPI
}
// New creates a new OpenStack metadata client.
-func New(ctx context.Context) (*Cloud, error) {
+func New(ctx context.Context) (*MetadataClient, error) {
imds := &imdsClient{client: &http.Client{}}
authURL, err := imds.authURL(ctx)
@@ -77,7 +77,7 @@ func New(ctx context.Context) (*Cloud, error) {
}
networksClient.Microversion = microversion
- return &Cloud{
+ return &MetadataClient{
imds: imds,
api: &apiClient{
servers: serversClient,
@@ -87,7 +87,7 @@ func New(ctx context.Context) (*Cloud, error) {
}
// Self returns the metadata of the current instance.
-func (c *Cloud) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
+func (c *MetadataClient) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
name, err := c.imds.name(ctx)
if err != nil {
return metadata.InstanceMetadata{}, fmt.Errorf("getting name: %w", err)
@@ -114,7 +114,7 @@ func (c *Cloud) Self(ctx context.Context) (metadata.InstanceMetadata, error) {
}
// List returns the metadata of all instances belonging to the same Constellation cluster.
-func (c *Cloud) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
+func (c *MetadataClient) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
uid, err := c.imds.uid(ctx)
if err != nil {
return nil, fmt.Errorf("getting uid: %w", err)
@@ -211,7 +211,7 @@ func (c *Cloud) List(ctx context.Context) ([]metadata.InstanceMetadata, error) {
}
// UID retrieves the UID of the constellation.
-func (c *Cloud) UID(ctx context.Context) (string, error) {
+func (c *MetadataClient) UID(ctx context.Context) (string, error) {
uid, err := c.imds.uid(ctx)
if err != nil {
return "", fmt.Errorf("retrieving instance UID: %w", err)
@@ -220,7 +220,7 @@ func (c *Cloud) UID(ctx context.Context) (string, error) {
}
// InitSecretHash retrieves the InitSecretHash of the current instance.
-func (c *Cloud) InitSecretHash(ctx context.Context) ([]byte, error) {
+func (c *MetadataClient) InitSecretHash(ctx context.Context) ([]byte, error) {
initSecretHash, err := c.imds.initSecretHash(ctx)
if err != nil {
return nil, fmt.Errorf("retrieving init secret hash: %w", err)
@@ -232,7 +232,7 @@ func (c *Cloud) InitSecretHash(ctx context.Context) ([]byte, error) {
// For OpenStack, the load balancer is a floating ip attached to
// a control plane node.
// TODO(malt3): Rewrite to use real load balancer once it is available.
-func (c *Cloud) GetLoadBalancerEndpoint(ctx context.Context) (host, port string, err error) {
+func (c *MetadataClient) GetLoadBalancerEndpoint(ctx context.Context) (host, port string, err error) {
host, err = c.imds.loadBalancerEndpoint(ctx)
if err != nil {
return "", "", fmt.Errorf("getting load balancer endpoint: %w", err)
@@ -240,7 +240,7 @@ func (c *Cloud) GetLoadBalancerEndpoint(ctx context.Context) (host, port string,
return host, strconv.FormatInt(constants.KubernetesPort, 10), nil
}
-func (c *Cloud) getSubnetCIDR(uidTag string) (netip.Prefix, error) {
+func (c *MetadataClient) getSubnetCIDR(uidTag string) (netip.Prefix, error) {
listNetworksOpts := networks.ListOpts{Tags: uidTag}
networksPage, err := c.api.ListNetworks(listNetworksOpts).AllPages()
if err != nil {
@@ -285,7 +285,7 @@ func (c *Cloud) getSubnetCIDR(uidTag string) (netip.Prefix, error) {
return cidr, nil
}
-func (c *Cloud) getServers(uidTag string) ([]servers.Server, error) {
+func (c *MetadataClient) getServers(uidTag string) ([]servers.Server, error) {
listServersOpts := servers.ListOpts{Tags: uidTag}
serversPage, err := c.api.ListServers(listServersOpts).AllPages()
if err != nil {
diff --git a/internal/cloud/openstack/openstack_test.go b/internal/cloud/openstack/openstack_test.go
index 88e9ff7fd..da8ed9d6b 100644
--- a/internal/cloud/openstack/openstack_test.go
+++ b/internal/cloud/openstack/openstack_test.go
@@ -86,7 +86,7 @@ func TestSelf(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
- c := &Cloud{imds: tc.imds}
+ c := &MetadataClient{imds: tc.imds}
got, err := c.Self(context.Background())
@@ -382,7 +382,7 @@ func TestList(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
- c := &Cloud{imds: tc.imds, api: tc.api}
+ c := &MetadataClient{imds: tc.imds, api: tc.api}
got, err := c.List(context.Background())
@@ -416,7 +416,7 @@ func TestUID(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
- c := &Cloud{imds: tc.imds}
+ c := &MetadataClient{imds: tc.imds}
got, err := c.UID(context.Background())
@@ -450,7 +450,7 @@ func TestInitSecretHash(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
- c := &Cloud{imds: tc.imds}
+ c := &MetadataClient{imds: tc.imds}
got, err := c.InitSecretHash(context.Background())
@@ -484,7 +484,7 @@ func TestGetLoadBalancerEndpoint(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
- c := &Cloud{imds: tc.imds}
+ c := &MetadataClient{imds: tc.imds}
got, _, err := c.GetLoadBalancerEndpoint(context.Background())
diff --git a/internal/config/azure.go b/internal/config/azure.go
index 2b1f29a03..79a414f53 100644
--- a/internal/config/azure.go
+++ b/internal/config/azure.go
@@ -146,8 +146,10 @@ func DefaultForAzureTDX() *AzureTDX {
PCESVN: 0,
TEETCBSVN: encoding.HexBytes{0x02, 0x01, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
QEVendorID: encoding.HexBytes{0x93, 0x9a, 0x72, 0x33, 0xf7, 0x9c, 0x4c, 0xa9, 0x94, 0x0a, 0x0d, 0xb3, 0x95, 0x7f, 0x06, 0x07},
- MRSeam: encoding.HexBytes{0x36, 0x03, 0x04, 0xd3, 0x4a, 0x16, 0xaa, 0xce, 0x0a, 0x18, 0xe0, 0x9a, 0xd2, 0xd0, 0x7d, 0x2b, 0x9f, 0xd3, 0xc1, 0x74, 0x37, 0x8e, 0x5b, 0xf1, 0x08, 0x38, 0x80, 0x79, 0x82, 0x7f, 0x89, 0xff, 0x62, 0xac, 0xc5, 0xf8, 0xc4, 0x73, 0xdd, 0x40, 0x70, 0x63, 0x24, 0x83, 0x4e, 0x20, 0x29, 0x46},
- XFAM: encoding.HexBytes{0xe7, 0x18, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00},
+ // Don't set a default for MRSEAM as it effectively prevents upgrading the SEAM module
+ // Quote verification still makes sure the module comes from Intel (through MRSIGNERSEAM), and is not of a lower version than expected
+ // MRSeam: nil,
+ XFAM: encoding.HexBytes{0xe7, 0x18, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00},
IntelRootKey: mustParsePEM(tdxRootPEM),
}
diff --git a/internal/config/config.go b/internal/config/config.go
index 611ccc39f..d044cb815 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -198,40 +198,22 @@ type OpenStackConfig struct {
// OpenStack cloud name to select from "clouds.yaml". Only required if config file for OpenStack is used. Fallback authentication uses environment variables. For details see: https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html.
Cloud string `yaml:"cloud"`
// description: |
+ // Path to OpenStack "clouds.yaml" file. Only required if automatic detection fails.
+ CloudsYAMLPath string `yaml:"cloudsYAMLPath"`
+ // description: |
// Availability zone to place the VMs in. For details see: https://docs.openstack.org/nova/latest/admin/availability-zones.html
AvailabilityZone string `yaml:"availabilityZone" validate:"required"`
// description: |
// Floating IP pool to use for the VMs. For details see: https://docs.openstack.org/ocata/user-guide/cli-manage-ip-addresses.html
FloatingIPPoolID string `yaml:"floatingIPPoolID" validate:"required"`
// description: |
- // AuthURL is the OpenStack Identity endpoint to use inside the cluster.
- AuthURL string `yaml:"authURL" validate:"required"`
- // description: |
- // ProjectID is the ID of the OpenStack project where a user resides.
- ProjectID string `yaml:"projectID" validate:"required"`
- // description: |
// STACKITProjectID is the ID of the STACKIT project where a user resides.
// Only used if cloud is "stackit".
STACKITProjectID string `yaml:"stackitProjectID"`
// description: |
- // ProjectName is the name of the project where a user resides.
- ProjectName string `yaml:"projectName" validate:"required"`
- // description: |
- // UserDomainName is the name of the domain where a user resides.
- UserDomainName string `yaml:"userDomainName" validate:"required"`
- // description: |
- // ProjectDomainName is the name of the domain where a project resides.
- ProjectDomainName string `yaml:"projectDomainName" validate:"required"`
- // description: |
// RegionName is the name of the region to use inside the cluster.
RegionName string `yaml:"regionName" validate:"required"`
// description: |
- // Username to use inside the cluster.
- Username string `yaml:"username" validate:"required"`
- // description: |
- // Password to use inside the cluster. You can instead use the environment variable "CONSTELL_OS_PASSWORD".
- Password string `yaml:"password"`
- // description: |
// Deploy Yawol loadbalancer. For details see: https://github.com/stackitcloud/yawol
DeployYawolLoadBalancer *bool `yaml:"deployYawolLoadBalancer" validate:"required"`
// description: |
@@ -496,11 +478,6 @@ func New(fileHandler file.Handler, name string, fetcher attestationconfigapi.Fet
fmt.Fprintf(os.Stderr, "WARNING: the environment variable %s is no longer used %s", constants.EnvVarAzureClientSecretValue, appRegistrationErrStr)
}
- openstackPassword := os.Getenv(constants.EnvVarOpenStackPassword)
- if openstackPassword != "" && c.Provider.OpenStack != nil {
- c.Provider.OpenStack.Password = openstackPassword
- }
-
return c, c.Validate(force)
}
@@ -720,7 +697,8 @@ func (c *Config) DeployYawolLoadBalancer() bool {
func (c *Config) UseMarketplaceImage() bool {
return (c.Provider.Azure != nil && c.Provider.Azure.UseMarketplaceImage != nil && *c.Provider.Azure.UseMarketplaceImage) ||
(c.Provider.GCP != nil && c.Provider.GCP.UseMarketplaceImage != nil && *c.Provider.GCP.UseMarketplaceImage) ||
- (c.Provider.AWS != nil && c.Provider.AWS.UseMarketplaceImage != nil && *c.Provider.AWS.UseMarketplaceImage)
+ (c.Provider.AWS != nil && c.Provider.AWS.UseMarketplaceImage != nil && *c.Provider.AWS.UseMarketplaceImage) ||
+ (c.Provider.OpenStack != nil && c.Provider.OpenStack.Cloud == "stackit")
}
// Validate checks the config values and returns validation errors.
@@ -899,14 +877,15 @@ func (c *Config) Validate(force bool) error {
// WithOpenStackProviderDefaults fills the default values for the specific OpenStack provider.
// If the provider is not supported or not an OpenStack provider, the config is returned unchanged.
-func (c *Config) WithOpenStackProviderDefaults(openStackProvider string) *Config {
+func (c *Config) WithOpenStackProviderDefaults(csp cloudprovider.Provider, openStackProvider string) *Config {
+ if csp != cloudprovider.OpenStack {
+ return c
+ }
+ c.Attestation.QEMUVTPM = &QEMUVTPM{Measurements: measurements.DefaultsFor(cloudprovider.OpenStack, variant.QEMUVTPM{})}
switch openStackProvider {
case "stackit":
c.Provider.OpenStack.Cloud = "stackit"
c.Provider.OpenStack.FloatingIPPoolID = "970ace5c-458f-484a-a660-0903bcfd91ad"
- c.Provider.OpenStack.AuthURL = "https://keystone.api.iaas.eu01.stackit.cloud/v3"
- c.Provider.OpenStack.UserDomainName = "portal_mvp"
- c.Provider.OpenStack.ProjectDomainName = "portal_mvp"
c.Provider.OpenStack.RegionName = "RegionOne"
c.Provider.OpenStack.DeployYawolLoadBalancer = toPtr(true)
c.Provider.OpenStack.YawolImageID = "bcd6c13e-75d1-4c3f-bf0f-8f83580cc1be"
@@ -1157,7 +1136,7 @@ type AzureTDX struct {
QEVendorID encoding.HexBytes `json:"qeVendorID" yaml:"qeVendorID"`
// description: |
// Expected 48 byte hex-encoded MR_SEAM value.
- MRSeam encoding.HexBytes `json:"mrSeam" yaml:"mrSeam"`
+ MRSeam encoding.HexBytes `json:"mrSeam,omitempty" yaml:"mrSeam,omitempty"`
// description: |
// Expected 8 byte hex-encoded XFAM field.
XFAM encoding.HexBytes `json:"xfam" yaml:"xfam"`
diff --git a/internal/config/config_doc.go b/internal/config/config_doc.go
index 8665922ed..2168b7f98 100644
--- a/internal/config/config_doc.go
+++ b/internal/config/config_doc.go
@@ -276,87 +276,57 @@ func init() {
FieldName: "openstack",
},
}
- OpenStackConfigDoc.Fields = make([]encoder.Doc, 16)
+ OpenStackConfigDoc.Fields = make([]encoder.Doc, 10)
OpenStackConfigDoc.Fields[0].Name = "cloud"
OpenStackConfigDoc.Fields[0].Type = "string"
OpenStackConfigDoc.Fields[0].Note = ""
OpenStackConfigDoc.Fields[0].Description = "OpenStack cloud name to select from \"clouds.yaml\". Only required if config file for OpenStack is used. Fallback authentication uses environment variables. For details see: https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html."
OpenStackConfigDoc.Fields[0].Comments[encoder.LineComment] = "OpenStack cloud name to select from \"clouds.yaml\". Only required if config file for OpenStack is used. Fallback authentication uses environment variables. For details see: https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html."
- OpenStackConfigDoc.Fields[1].Name = "availabilityZone"
+ OpenStackConfigDoc.Fields[1].Name = "cloudsYAMLPath"
OpenStackConfigDoc.Fields[1].Type = "string"
OpenStackConfigDoc.Fields[1].Note = ""
- OpenStackConfigDoc.Fields[1].Description = "Availability zone to place the VMs in. For details see: https://docs.openstack.org/nova/latest/admin/availability-zones.html"
- OpenStackConfigDoc.Fields[1].Comments[encoder.LineComment] = "Availability zone to place the VMs in. For details see: https://docs.openstack.org/nova/latest/admin/availability-zones.html"
- OpenStackConfigDoc.Fields[2].Name = "floatingIPPoolID"
+ OpenStackConfigDoc.Fields[1].Description = "Path to OpenStack \"clouds.yaml\" file. Only required if automatic detection fails."
+ OpenStackConfigDoc.Fields[1].Comments[encoder.LineComment] = "Path to OpenStack \"clouds.yaml\" file. Only required if automatic detection fails."
+ OpenStackConfigDoc.Fields[2].Name = "availabilityZone"
OpenStackConfigDoc.Fields[2].Type = "string"
OpenStackConfigDoc.Fields[2].Note = ""
- OpenStackConfigDoc.Fields[2].Description = "Floating IP pool to use for the VMs. For details see: https://docs.openstack.org/ocata/user-guide/cli-manage-ip-addresses.html"
- OpenStackConfigDoc.Fields[2].Comments[encoder.LineComment] = "Floating IP pool to use for the VMs. For details see: https://docs.openstack.org/ocata/user-guide/cli-manage-ip-addresses.html"
- OpenStackConfigDoc.Fields[3].Name = "authURL"
+ OpenStackConfigDoc.Fields[2].Description = "Availability zone to place the VMs in. For details see: https://docs.openstack.org/nova/latest/admin/availability-zones.html"
+ OpenStackConfigDoc.Fields[2].Comments[encoder.LineComment] = "Availability zone to place the VMs in. For details see: https://docs.openstack.org/nova/latest/admin/availability-zones.html"
+ OpenStackConfigDoc.Fields[3].Name = "floatingIPPoolID"
OpenStackConfigDoc.Fields[3].Type = "string"
OpenStackConfigDoc.Fields[3].Note = ""
- OpenStackConfigDoc.Fields[3].Description = "description: |\nAuthURL is the OpenStack Identity endpoint to use inside the cluster.\n"
- OpenStackConfigDoc.Fields[3].Comments[encoder.LineComment] = "description: |"
- OpenStackConfigDoc.Fields[4].Name = "projectID"
+ OpenStackConfigDoc.Fields[3].Description = "Floating IP pool to use for the VMs. For details see: https://docs.openstack.org/ocata/user-guide/cli-manage-ip-addresses.html"
+ OpenStackConfigDoc.Fields[3].Comments[encoder.LineComment] = "Floating IP pool to use for the VMs. For details see: https://docs.openstack.org/ocata/user-guide/cli-manage-ip-addresses.html"
+ OpenStackConfigDoc.Fields[4].Name = "stackitProjectID"
OpenStackConfigDoc.Fields[4].Type = "string"
OpenStackConfigDoc.Fields[4].Note = ""
- OpenStackConfigDoc.Fields[4].Description = "ProjectID is the ID of the OpenStack project where a user resides."
- OpenStackConfigDoc.Fields[4].Comments[encoder.LineComment] = "ProjectID is the ID of the OpenStack project where a user resides."
- OpenStackConfigDoc.Fields[5].Name = "stackitProjectID"
+ OpenStackConfigDoc.Fields[4].Description = "STACKITProjectID is the ID of the STACKIT project where a user resides.\nOnly used if cloud is \"stackit\"."
+ OpenStackConfigDoc.Fields[4].Comments[encoder.LineComment] = "STACKITProjectID is the ID of the STACKIT project where a user resides."
+ OpenStackConfigDoc.Fields[5].Name = "regionName"
OpenStackConfigDoc.Fields[5].Type = "string"
OpenStackConfigDoc.Fields[5].Note = ""
- OpenStackConfigDoc.Fields[5].Description = "STACKITProjectID is the ID of the STACKIT project where a user resides.\nOnly used if cloud is \"stackit\"."
- OpenStackConfigDoc.Fields[5].Comments[encoder.LineComment] = "STACKITProjectID is the ID of the STACKIT project where a user resides."
- OpenStackConfigDoc.Fields[6].Name = "projectName"
- OpenStackConfigDoc.Fields[6].Type = "string"
+ OpenStackConfigDoc.Fields[5].Description = "description: |\nRegionName is the name of the region to use inside the cluster.\n"
+ OpenStackConfigDoc.Fields[5].Comments[encoder.LineComment] = "description: |"
+ OpenStackConfigDoc.Fields[6].Name = "deployYawolLoadBalancer"
+ OpenStackConfigDoc.Fields[6].Type = "bool"
OpenStackConfigDoc.Fields[6].Note = ""
- OpenStackConfigDoc.Fields[6].Description = "ProjectName is the name of the project where a user resides."
- OpenStackConfigDoc.Fields[6].Comments[encoder.LineComment] = "ProjectName is the name of the project where a user resides."
- OpenStackConfigDoc.Fields[7].Name = "userDomainName"
+ OpenStackConfigDoc.Fields[6].Description = "Deploy Yawol loadbalancer. For details see: https://github.com/stackitcloud/yawol"
+ OpenStackConfigDoc.Fields[6].Comments[encoder.LineComment] = "Deploy Yawol loadbalancer. For details see: https://github.com/stackitcloud/yawol"
+ OpenStackConfigDoc.Fields[7].Name = "yawolImageID"
OpenStackConfigDoc.Fields[7].Type = "string"
OpenStackConfigDoc.Fields[7].Note = ""
- OpenStackConfigDoc.Fields[7].Description = "UserDomainName is the name of the domain where a user resides."
- OpenStackConfigDoc.Fields[7].Comments[encoder.LineComment] = "UserDomainName is the name of the domain where a user resides."
- OpenStackConfigDoc.Fields[8].Name = "projectDomainName"
+ OpenStackConfigDoc.Fields[7].Description = "OpenStack OS image used by the yawollet. For details see: https://github.com/stackitcloud/yawol"
+ OpenStackConfigDoc.Fields[7].Comments[encoder.LineComment] = "OpenStack OS image used by the yawollet. For details see: https://github.com/stackitcloud/yawol"
+ OpenStackConfigDoc.Fields[8].Name = "yawolFlavorID"
OpenStackConfigDoc.Fields[8].Type = "string"
OpenStackConfigDoc.Fields[8].Note = ""
- OpenStackConfigDoc.Fields[8].Description = "ProjectDomainName is the name of the domain where a project resides."
- OpenStackConfigDoc.Fields[8].Comments[encoder.LineComment] = "ProjectDomainName is the name of the domain where a project resides."
- OpenStackConfigDoc.Fields[9].Name = "regionName"
- OpenStackConfigDoc.Fields[9].Type = "string"
+ OpenStackConfigDoc.Fields[8].Description = "OpenStack flavor id used for yawollets. For details see: https://github.com/stackitcloud/yawol"
+ OpenStackConfigDoc.Fields[8].Comments[encoder.LineComment] = "OpenStack flavor id used for yawollets. For details see: https://github.com/stackitcloud/yawol"
+ OpenStackConfigDoc.Fields[9].Name = "deployCSIDriver"
+ OpenStackConfigDoc.Fields[9].Type = "bool"
OpenStackConfigDoc.Fields[9].Note = ""
- OpenStackConfigDoc.Fields[9].Description = "description: |\nRegionName is the name of the region to use inside the cluster.\n"
- OpenStackConfigDoc.Fields[9].Comments[encoder.LineComment] = "description: |"
- OpenStackConfigDoc.Fields[10].Name = "username"
- OpenStackConfigDoc.Fields[10].Type = "string"
- OpenStackConfigDoc.Fields[10].Note = ""
- OpenStackConfigDoc.Fields[10].Description = "Username to use inside the cluster."
- OpenStackConfigDoc.Fields[10].Comments[encoder.LineComment] = "Username to use inside the cluster."
- OpenStackConfigDoc.Fields[11].Name = "password"
- OpenStackConfigDoc.Fields[11].Type = "string"
- OpenStackConfigDoc.Fields[11].Note = ""
- OpenStackConfigDoc.Fields[11].Description = "Password to use inside the cluster. You can instead use the environment variable \"CONSTELL_OS_PASSWORD\"."
- OpenStackConfigDoc.Fields[11].Comments[encoder.LineComment] = "Password to use inside the cluster. You can instead use the environment variable \"CONSTELL_OS_PASSWORD\"."
- OpenStackConfigDoc.Fields[12].Name = "deployYawolLoadBalancer"
- OpenStackConfigDoc.Fields[12].Type = "bool"
- OpenStackConfigDoc.Fields[12].Note = ""
- OpenStackConfigDoc.Fields[12].Description = "Deploy Yawol loadbalancer. For details see: https://github.com/stackitcloud/yawol"
- OpenStackConfigDoc.Fields[12].Comments[encoder.LineComment] = "Deploy Yawol loadbalancer. For details see: https://github.com/stackitcloud/yawol"
- OpenStackConfigDoc.Fields[13].Name = "yawolImageID"
- OpenStackConfigDoc.Fields[13].Type = "string"
- OpenStackConfigDoc.Fields[13].Note = ""
- OpenStackConfigDoc.Fields[13].Description = "OpenStack OS image used by the yawollet. For details see: https://github.com/stackitcloud/yawol"
- OpenStackConfigDoc.Fields[13].Comments[encoder.LineComment] = "OpenStack OS image used by the yawollet. For details see: https://github.com/stackitcloud/yawol"
- OpenStackConfigDoc.Fields[14].Name = "yawolFlavorID"
- OpenStackConfigDoc.Fields[14].Type = "string"
- OpenStackConfigDoc.Fields[14].Note = ""
- OpenStackConfigDoc.Fields[14].Description = "OpenStack flavor id used for yawollets. For details see: https://github.com/stackitcloud/yawol"
- OpenStackConfigDoc.Fields[14].Comments[encoder.LineComment] = "OpenStack flavor id used for yawollets. For details see: https://github.com/stackitcloud/yawol"
- OpenStackConfigDoc.Fields[15].Name = "deployCSIDriver"
- OpenStackConfigDoc.Fields[15].Type = "bool"
- OpenStackConfigDoc.Fields[15].Note = ""
- OpenStackConfigDoc.Fields[15].Description = "Deploy Cinder CSI driver with on-node encryption. For details see: https://docs.edgeless.systems/constellation/architecture/encrypted-storage"
- OpenStackConfigDoc.Fields[15].Comments[encoder.LineComment] = "Deploy Cinder CSI driver with on-node encryption. For details see: https://docs.edgeless.systems/constellation/architecture/encrypted-storage"
+ OpenStackConfigDoc.Fields[9].Description = "Deploy Cinder CSI driver with on-node encryption. For details see: https://docs.edgeless.systems/constellation/architecture/encrypted-storage"
+ OpenStackConfigDoc.Fields[9].Comments[encoder.LineComment] = "Deploy Cinder CSI driver with on-node encryption. For details see: https://docs.edgeless.systems/constellation/architecture/encrypted-storage"
QEMUConfigDoc.Type = "QEMUConfig"
QEMUConfigDoc.Comments[encoder.LineComment] = "QEMUConfig holds config information for QEMU based Constellation deployments."
diff --git a/internal/config/config_test.go b/internal/config/config_test.go
index 204ac52ef..013c50edc 100644
--- a/internal/config/config_test.go
+++ b/internal/config/config_test.go
@@ -328,7 +328,7 @@ func TestFromFile(t *testing.T) {
}
func TestValidate(t *testing.T) {
- const defaultErrCount = 38 // expect this number of error messages by default because user-specific values are not set and multiple providers are defined by default
+ const defaultErrCount = 32 // expect this number of error messages by default because user-specific values are not set and multiple providers are defined by default
const azErrCount = 7
const awsErrCount = 8
const gcpErrCount = 8
diff --git a/internal/config/image_enterprise.go b/internal/config/image_enterprise.go
index 6b6efd6ce..45e272957 100644
--- a/internal/config/image_enterprise.go
+++ b/internal/config/image_enterprise.go
@@ -10,5 +10,5 @@ package config
const (
// defaultImage is the default image to use.
- defaultImage = "ref/main/stream/nightly/v2.16.0-pre.0.20240227085922-80518379c44d"
+ defaultImage = "v2.16.4"
)
diff --git a/internal/config/instancetypes/BUILD.bazel b/internal/config/instancetypes/BUILD.bazel
index 7080f5bb8..609892693 100644
--- a/internal/config/instancetypes/BUILD.bazel
+++ b/internal/config/instancetypes/BUILD.bazel
@@ -6,6 +6,7 @@ go_library(
"aws.go",
"azure.go",
"gcp.go",
+ "stackit.go",
],
importpath = "github.com/edgelesssys/constellation/v2/internal/config/instancetypes",
visibility = ["//:__subpackages__"],
diff --git a/internal/config/instancetypes/stackit.go b/internal/config/instancetypes/stackit.go
new file mode 100644
index 000000000..68ea21d94
--- /dev/null
+++ b/internal/config/instancetypes/stackit.go
@@ -0,0 +1,16 @@
+/*
+Copyright (c) Edgeless Systems GmbH
+
+SPDX-License-Identifier: AGPL-3.0-only
+*/
+
+package instancetypes
+
+// STACKITInstanceTypes are valid STACKIT instance types.
+var STACKITInstanceTypes = []string{
+ "m1a.2cd",
+ "m1a.4cd",
+ "m1a.8cd",
+ "m1a.16cd",
+ "m1a.30cd",
+}
diff --git a/internal/config/migration/migration.go b/internal/config/migration/migration.go
index 03bfb5b5c..54ca54335 100644
--- a/internal/config/migration/migration.go
+++ b/internal/config/migration/migration.go
@@ -381,14 +381,7 @@ func V3ToV4(path string, fileHandler file.Handler) error {
Cloud: cfgV3.Provider.OpenStack.Cloud,
AvailabilityZone: cfgV3.Provider.OpenStack.AvailabilityZone,
FloatingIPPoolID: cfgV3.Provider.OpenStack.FloatingIPPoolID,
- AuthURL: cfgV3.Provider.OpenStack.AuthURL,
- ProjectID: cfgV3.Provider.OpenStack.ProjectID,
- ProjectName: cfgV3.Provider.OpenStack.ProjectName,
- UserDomainName: cfgV3.Provider.OpenStack.UserDomainName,
- ProjectDomainName: cfgV3.Provider.OpenStack.ProjectDomainName,
RegionName: cfgV3.Provider.OpenStack.RegionName,
- Username: cfgV3.Provider.OpenStack.Username,
- Password: cfgV3.Provider.OpenStack.Password,
DeployYawolLoadBalancer: cfgV3.Provider.OpenStack.DeployYawolLoadBalancer,
YawolImageID: cfgV3.Provider.OpenStack.YawolImageID,
YawolFlavorID: cfgV3.Provider.OpenStack.YawolFlavorID,
diff --git a/internal/constellation/apply.go b/internal/constellation/apply.go
index bbd61cf8c..c9844b435 100644
--- a/internal/constellation/apply.go
+++ b/internal/constellation/apply.go
@@ -87,7 +87,7 @@ func (a *Applier) SetKubeConfig(kubeConfig []byte) error {
// CheckLicense checks the given Constellation license with the license server
// and returns the allowed quota for the license.
func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider, initRequest bool, licenseID string) (int, error) {
- a.log.Debug(fmt.Sprintf("Contacting license server for license '%s'", licenseID))
+ a.log.Debug(fmt.Sprintf("Contacting license server for license %q", licenseID))
var action license.Action
if initRequest {
@@ -103,7 +103,7 @@ func (a *Applier) CheckLicense(ctx context.Context, csp cloudprovider.Provider,
if err != nil {
return 0, fmt.Errorf("checking license: %w", err)
}
- a.log.Debug(fmt.Sprintf("Got response from license server for license '%s'", licenseID))
+ a.log.Debug(fmt.Sprintf("Got response from license server for license %q", licenseID))
return quota, nil
}
diff --git a/internal/constellation/applyinit.go b/internal/constellation/applyinit.go
index f02c9e8cc..e451e4fd8 100644
--- a/internal/constellation/applyinit.go
+++ b/internal/constellation/applyinit.go
@@ -85,12 +85,12 @@ func (a *Applier) Init(
// Create a wrapper function that allows logging any returned error from the retrier before checking if it's the expected retriable one.
serviceIsUnavailable := func(err error) bool {
isServiceUnavailable := grpcRetry.ServiceIsUnavailable(err)
- a.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %s", isServiceUnavailable, err))
+ a.log.Debug(fmt.Sprintf("Encountered error (retriable: %t): %q", isServiceUnavailable, err))
return isServiceUnavailable
}
// Perform the RPC
- a.log.Debug(fmt.Sprintf("Making initialization call, doer is %+v", doer))
+ a.log.Debug("Initialization call", "endpoint", doer.endpoint, "kmsURI", doer.req.KmsUri, "storageURI", doer.req.StorageUri)
a.spinner.Start("Connecting ", false)
retrier := retry.NewIntervalRetrier(doer, 30*time.Second, serviceIsUnavailable)
if err := retrier.Do(ctx); err != nil {
@@ -99,7 +99,7 @@ func (a *Applier) Init(
a.spinner.Stop()
a.log.Debug("Initialization request finished")
- a.log.Debug(fmt.Sprintf("Rewriting cluster server address in kubeconfig to %s", state.Infrastructure.ClusterEndpoint))
+ a.log.Debug(fmt.Sprintf("Rewriting cluster server address in kubeconfig to %q", state.Infrastructure.ClusterEndpoint))
kubeconfig, err := clientcmd.Load(doer.resp.Kubeconfig)
if err != nil {
return InitOutput{}, fmt.Errorf("loading kubeconfig: %w", err)
@@ -175,7 +175,7 @@ func (d *initDoer) Do(ctx context.Context) error {
conn, err := d.dialer.Dial(ctx, d.endpoint)
if err != nil {
- d.log.Debug(fmt.Sprintf("Dialing init server failed: %s. Retrying...", err))
+ d.log.Debug(fmt.Sprintf("Dialing init server failed: %q. Retrying...", err))
return fmt.Errorf("dialing init server: %w", err)
}
defer conn.Close()
@@ -200,7 +200,7 @@ func (d *initDoer) Do(ctx context.Context) error {
res, err := resp.Recv() // get first response, either success or failure
if err != nil {
if e := d.getLogs(resp); e != nil {
- d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e))
+ d.log.Debug(fmt.Sprintf("Failed to collect logs: %q", e))
return &NonRetriableInitError{
LogCollectionErr: e,
Err: err,
@@ -214,7 +214,7 @@ func (d *initDoer) Do(ctx context.Context) error {
d.resp = res.GetInitSuccess()
case *initproto.InitResponse_InitFailure:
if e := d.getLogs(resp); e != nil {
- d.log.Debug(fmt.Sprintf("Failed to get logs from cluster: %s", e))
+ d.log.Debug(fmt.Sprintf("Failed to get logs from cluster: %q", e))
return &NonRetriableInitError{
LogCollectionErr: e,
Err: errors.New(res.GetInitFailure().GetError()),
@@ -225,7 +225,7 @@ func (d *initDoer) Do(ctx context.Context) error {
d.log.Debug("Cluster returned nil response type")
err = errors.New("empty response from cluster")
if e := d.getLogs(resp); e != nil {
- d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e))
+ d.log.Debug(fmt.Sprintf("Failed to collect logs: %q", e))
return &NonRetriableInitError{
LogCollectionErr: e,
Err: err,
@@ -236,7 +236,7 @@ func (d *initDoer) Do(ctx context.Context) error {
d.log.Debug("Cluster returned unknown response type")
err = errors.New("unknown response from cluster")
if e := d.getLogs(resp); e != nil {
- d.log.Debug(fmt.Sprintf("Failed to collect logs: %s", e))
+ d.log.Debug(fmt.Sprintf("Failed to collect logs: %q", e))
return &NonRetriableInitError{
LogCollectionErr: e,
Err: err,
diff --git a/internal/constellation/helm.go b/internal/constellation/helm.go
index e8b9a815f..1378ce3a0 100644
--- a/internal/constellation/helm.go
+++ b/internal/constellation/helm.go
@@ -9,7 +9,6 @@ package constellation
import (
"errors"
- "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constellation/helm"
"github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
@@ -17,18 +16,18 @@ import (
// PrepareHelmCharts loads Helm charts for Constellation and returns an executor to apply them.
func (a *Applier) PrepareHelmCharts(
- flags helm.Options, state *state.State, serviceAccURI string, masterSecret uri.MasterSecret, openStackCfg *config.OpenStackConfig,
+ flags helm.Options, state *state.State, serviceAccURI string, masterSecret uri.MasterSecret,
) (helm.Applier, bool, error) {
if a.helmClient == nil {
return nil, false, errors.New("helm client not initialized")
}
- return a.helmClient.PrepareApply(flags, state, serviceAccURI, masterSecret, openStackCfg)
+ return a.helmClient.PrepareApply(flags, state, serviceAccURI, masterSecret)
}
type helmApplier interface {
PrepareApply(
- flags helm.Options, stateFile *state.State, serviceAccURI string, masterSecret uri.MasterSecret, openStackCfg *config.OpenStackConfig,
+ flags helm.Options, stateFile *state.State, serviceAccURI string, masterSecret uri.MasterSecret,
) (
helm.Applier, bool, error)
}
diff --git a/internal/constellation/helm/BUILD.bazel b/internal/constellation/helm/BUILD.bazel
index d579dddb9..6e3c5eee7 100644
--- a/internal/constellation/helm/BUILD.bazel
+++ b/internal/constellation/helm/BUILD.bazel
@@ -467,7 +467,6 @@ go_library(
"//internal/cloud/gcpshared",
"//internal/cloud/openstack",
"//internal/compatibility",
- "//internal/config",
"//internal/constants",
"//internal/constellation/helm/imageversion",
"//internal/constellation/state",
diff --git a/internal/constellation/helm/action.go b/internal/constellation/helm/action.go
index 8761b2104..30c1c312d 100644
--- a/internal/constellation/helm/action.go
+++ b/internal/constellation/helm/action.go
@@ -8,8 +8,10 @@ package helm
import (
"context"
+ "errors"
"fmt"
"path/filepath"
+ "strings"
"time"
"github.com/edgelesssys/constellation/v2/internal/constants"
@@ -52,6 +54,12 @@ func newHelmInstallAction(config *action.Configuration, release release, timeout
return action
}
+func newHelmUninstallAction(config *action.Configuration, timeout time.Duration) *action.Uninstall {
+ action := action.NewUninstall(config)
+ action.Timeout = timeout
+ return action
+}
+
func setWaitMode(a *action.Install, waitMode WaitMode) {
switch waitMode {
case WaitModeNone:
@@ -70,11 +78,12 @@ func setWaitMode(a *action.Install, waitMode WaitMode) {
// installAction is an action that installs a helm chart.
type installAction struct {
- preInstall func(context.Context) error
- release release
- helmAction *action.Install
- postInstall func(context.Context) error
- log debugLog
+ preInstall func(context.Context) error
+ release release
+ helmAction *action.Install
+ uninstallAction *action.Uninstall
+ postInstall func(context.Context) error
+ log debugLog
}
// Apply installs the chart.
@@ -103,6 +112,11 @@ func (a *installAction) SaveChart(chartsDir string, fileHandler file.Handler) er
func (a *installAction) apply(ctx context.Context) error {
_, err := a.helmAction.RunWithContext(ctx, a.release.chart, a.release.values)
+ if isUninstallError(err) && a.uninstallAction != nil {
+ a.log.Debug("cleaning up manually after failed atomic Helm install", "error", err, "release", a.release.releaseName)
+ _, uninstallErr := a.uninstallAction.Run(a.release.releaseName)
+ err = errors.Join(err, uninstallErr)
+ }
return err
}
@@ -228,3 +242,8 @@ func helmLog(log debugLog) action.DebugLog {
log.Debug(fmt.Sprintf(format, v...))
}
}
+
+func isUninstallError(err error) bool {
+ return err != nil && (strings.Contains(err.Error(), "an error occurred while uninstalling the release") ||
+ strings.Contains(err.Error(), "cannot re-use a name that is still in use"))
+}
diff --git a/internal/constellation/helm/actionfactory.go b/internal/constellation/helm/actionfactory.go
index 73336f3eb..67ca3ab34 100644
--- a/internal/constellation/helm/actionfactory.go
+++ b/internal/constellation/helm/actionfactory.go
@@ -90,15 +90,15 @@ func (a actionFactory) appendNewAction(
)
}
- a.log.Debug(fmt.Sprintf("release %s not found, adding to new releases...", release.releaseName))
+ a.log.Debug(fmt.Sprintf("release %q not found, adding to new releases...", release.releaseName))
*actions = append(*actions, a.newInstall(release, timeout))
return nil
}
if err != nil {
return fmt.Errorf("getting version for %s: %w", release.releaseName, err)
}
- a.log.Debug(fmt.Sprintf("Current %s version: %s", release.releaseName, currentVersion))
- a.log.Debug(fmt.Sprintf("New %s version: %s", release.releaseName, newVersion))
+ a.log.Debug(fmt.Sprintf("Current %q version: %q", release.releaseName, currentVersion))
+ a.log.Debug(fmt.Sprintf("New %q version: %q", release.releaseName, newVersion))
if !force {
// For charts we package ourselves, the version is equal to the CLI version (charts are embedded in the binary).
@@ -132,13 +132,16 @@ func (a actionFactory) appendNewAction(
release.releaseName == certManagerInfo.releaseName {
return ErrConfirmationMissing
}
- a.log.Debug(fmt.Sprintf("Upgrading %s from %s to %s", release.releaseName, currentVersion, newVersion))
+ a.log.Debug(fmt.Sprintf("Upgrading %q from %q to %q", release.releaseName, currentVersion, newVersion))
*actions = append(*actions, a.newUpgrade(release, timeout))
return nil
}
func (a actionFactory) newInstall(release release, timeout time.Duration) *installAction {
action := &installAction{helmAction: newHelmInstallAction(a.cfg, release, timeout), release: release, log: a.log}
+ if action.IsAtomic() {
+ action.uninstallAction = newHelmUninstallAction(a.cfg, timeout)
+ }
return action
}
@@ -162,7 +165,7 @@ func (a actionFactory) updateCRDs(ctx context.Context, chart *chart.Chart) error
for _, dep := range chart.Dependencies() {
for _, crdFile := range dep.Files {
if strings.HasPrefix(crdFile.Name, "crds/") {
- a.log.Debug(fmt.Sprintf("Updating crd: %s", crdFile.Name))
+ a.log.Debug(fmt.Sprintf("Updating crd: %q", crdFile.Name))
err := a.kubeClient.ApplyCRD(ctx, crdFile.Data)
if err != nil {
return err
diff --git a/internal/constellation/helm/charts/cilium/Chart.yaml b/internal/constellation/helm/charts/cilium/Chart.yaml
index 3f3fc714b..3ba2d273f 100644
--- a/internal/constellation/helm/charts/cilium/Chart.yaml
+++ b/internal/constellation/helm/charts/cilium/Chart.yaml
@@ -2,8 +2,8 @@ apiVersion: v2
name: cilium
displayName: Cilium
home: https://cilium.io/
-version: 1.15.0-pre.3-edg.2
-appVersion: 1.15.0-pre.3-edg.2
+version: 1.15.0-pre.3-edg.3
+appVersion: 1.15.0-pre.3-edg.3
kubeVersion: ">= 1.16.0-0"
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
description: eBPF-based Networking, Security, and Observability
diff --git a/internal/constellation/helm/charts/cilium/templates/cilium-agent/daemonset.yaml b/internal/constellation/helm/charts/cilium/templates/cilium-agent/daemonset.yaml
index f6b493cb7..773a5b26b 100644
--- a/internal/constellation/helm/charts/cilium/templates/cilium-agent/daemonset.yaml
+++ b/internal/constellation/helm/charts/cilium/templates/cilium-agent/daemonset.yaml
@@ -715,6 +715,37 @@ spec:
- name: cni-path
mountPath: /host/opt/cni/bin
{{- end }} # .Values.cni.install
+ - name: firewall-pods
+ image: ghcr.io/edgelesssys/cilium/cilium:v1.15.0-pre.3-edg.2@sha256:c21b7fbbb084a128a479d6170e5f89ad2768dfecb4af10ee6a99ffe5d1a11749
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/bash
+ - -exc
+ - |
+ pref=32
+ interface=$(ip route | awk '/^default/ { print $5 }')
+ tc qdisc add dev "${interface}" clsact || true
+ tc filter del dev "${interface}" ingress pref "${pref}" 2>/dev/null || true
+ handle=0
+ for cidr in ${POD_CIDRS}; do
+ handle=$((handle + 1))
+ tc filter replace dev "${interface}" ingress pref "${pref}" handle "${handle}" protocol ip flower dst_ip "${cidr}" action drop
+ done
+ env:
+ - name: POD_CIDRS
+ valueFrom:
+ configMapKeyRef:
+ key: encryption-strict-mode-pod-cidrs
+ name: cilium-config
+ optional: true
+ resources:
+ requests:
+ cpu: 100m
+ memory: 20Mi
+ securityContext:
+ capabilities:
+ add:
+ - NET_ADMIN
restartPolicy: Always
priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.priorityClassName "system-node-critical") }}
serviceAccount: {{ .Values.serviceAccounts.cilium.name | quote }}
diff --git a/internal/constellation/helm/cilium.patch b/internal/constellation/helm/cilium.patch
index 26d7c3343..cc12f4cb5 100644
--- a/internal/constellation/helm/cilium.patch
+++ b/internal/constellation/helm/cilium.patch
@@ -54,8 +54,50 @@ index 256a79542..3f3fc714b 100644
home: https://cilium.io/
-version: 1.15.0-pre.3
-appVersion: 1.15.0-pre.3
-+version: 1.15.0-pre.3-edg.2
-+appVersion: 1.15.0-pre.3-edg.2
++version: 1.15.0-pre.3-edg.3
++appVersion: 1.15.0-pre.3-edg.3
kubeVersion: ">= 1.16.0-0"
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
description: eBPF-based Networking, Security, and Observability
+diff --git a/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml b/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml
+index f6b493cb7..50b80267a 100644
+--- a/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml
++++ b/install/kubernetes/cilium/templates/cilium-agent/daemonset.yaml
+@@ -715,6 +715,37 @@ spec:
+ - name: cni-path
+ mountPath: /host/opt/cni/bin
+ {{- end }} # .Values.cni.install
++ - name: firewall-pods
++ image: ghcr.io/edgelesssys/cilium/cilium:v1.15.0-pre.3-edg.2@sha256:c21b7fbbb084a128a479d6170e5f89ad2768dfecb4af10ee6a99ffe5d1a11749
++ imagePullPolicy: IfNotPresent
++ command:
++ - /bin/bash
++ - -exc
++ - |
++ pref=32
++ interface=$(ip route | awk '/^default/ { print $5 }')
++ tc qdisc add dev "${interface}" clsact || true
++ tc filter del dev "${interface}" ingress pref "${pref}" 2>/dev/null || true
++ handle=0
++ for cidr in ${POD_CIDRS}; do
++ handle=$((handle + 1))
++ tc filter replace dev "${interface}" ingress pref "${pref}" handle "${handle}" protocol ip flower dst_ip "${cidr}" action drop
++ done
++ env:
++ - name: POD_CIDRS
++ valueFrom:
++ configMapKeyRef:
++ key: encryption-strict-mode-pod-cidrs
++ name: cilium-config
++ optional: true
++ resources:
++ requests:
++ cpu: 100m
++ memory: 20Mi
++ securityContext:
++ capabilities:
++ add:
++ - NET_ADMIN
+ restartPolicy: Always
+ priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.priorityClassName "system-node-critical") }}
+ serviceAccount: {{ .Values.serviceAccounts.cilium.name | quote }}
diff --git a/internal/constellation/helm/helm.go b/internal/constellation/helm/helm.go
index ab0438214..dcc994c6c 100644
--- a/internal/constellation/helm/helm.go
+++ b/internal/constellation/helm/helm.go
@@ -35,7 +35,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
- "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/file"
@@ -91,13 +90,14 @@ type Options struct {
MicroserviceVersion semver.Semver
HelmWaitMode WaitMode
ApplyTimeout time.Duration
+ OpenStackValues *OpenStackValues
}
// PrepareApply loads the charts and returns the executor to apply them.
func (h Client) PrepareApply(
- flags Options, stateFile *state.State, serviceAccURI string, masterSecret uri.MasterSecret, openStackCfg *config.OpenStackConfig,
+ flags Options, stateFile *state.State, serviceAccURI string, masterSecret uri.MasterSecret,
) (Applier, bool, error) {
- releases, err := h.loadReleases(flags.CSP, flags.AttestationVariant, flags.K8sVersion, masterSecret, stateFile, flags, serviceAccURI, openStackCfg)
+ releases, err := h.loadReleases(masterSecret, stateFile, flags, serviceAccURI)
if err != nil {
return nil, false, fmt.Errorf("loading Helm releases: %w", err)
}
@@ -110,12 +110,11 @@ func (h Client) PrepareApply(
}
func (h Client) loadReleases(
- csp cloudprovider.Provider, attestationVariant variant.Variant, k8sVersion versions.ValidK8sVersion, secret uri.MasterSecret,
- stateFile *state.State, flags Options, serviceAccURI string, openStackCfg *config.OpenStackConfig,
+ secret uri.MasterSecret, stateFile *state.State, flags Options, serviceAccURI string,
) ([]release, error) {
- helmLoader := newLoader(csp, attestationVariant, k8sVersion, stateFile, h.cliVersion)
+ helmLoader := newLoader(flags.CSP, flags.AttestationVariant, flags.K8sVersion, stateFile, h.cliVersion)
h.log.Debug("Created new Helm loader")
- return helmLoader.loadReleases(flags.Conformance, flags.DeployCSIDriver, flags.HelmWaitMode, secret, serviceAccURI, openStackCfg)
+ return helmLoader.loadReleases(flags.Conformance, flags.DeployCSIDriver, flags.HelmWaitMode, secret, serviceAccURI, flags.OpenStackValues)
}
// Applier runs the Helm actions.
diff --git a/internal/constellation/helm/helm_test.go b/internal/constellation/helm/helm_test.go
index aed7689d0..cd8aab6a6 100644
--- a/internal/constellation/helm/helm_test.go
+++ b/internal/constellation/helm/helm_test.go
@@ -198,7 +198,7 @@ func TestHelmApply(t *testing.T) {
if tc.clusterCertManagerVersion != nil {
certManagerVersion = *tc.clusterCertManagerVersion
}
- helmListVersion(lister, "cilium", "v1.15.0-pre.3-edg.2")
+ helmListVersion(lister, "cilium", "v1.15.0-pre.3-edg.3")
helmListVersion(lister, "cert-manager", certManagerVersion)
helmListVersion(lister, "constellation-services", tc.clusterMicroServiceVersion)
helmListVersion(lister, "constellation-operators", tc.clusterMicroServiceVersion)
@@ -217,7 +217,7 @@ func TestHelmApply(t *testing.T) {
SetInfrastructure(state.Infrastructure{UID: "testuid"}).
SetClusterValues(state.ClusterValues{MeasurementSalt: []byte{0x41}}),
fakeServiceAccURI(csp),
- uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")}, nil)
+ uri.MasterSecret{Key: []byte("secret"), Salt: []byte("masterSalt")})
var upgradeErr *compatibility.InvalidUpgradeError
if tc.expectError {
assert.Error(t, err)
diff --git a/internal/constellation/helm/loader.go b/internal/constellation/helm/loader.go
index 6f6c95d4f..a3c6a50fa 100644
--- a/internal/constellation/helm/loader.go
+++ b/internal/constellation/helm/loader.go
@@ -21,7 +21,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
- "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/constellation/helm/imageversion"
"github.com/edgelesssys/constellation/v2/internal/constellation/state"
@@ -115,9 +114,17 @@ func newLoader(csp cloudprovider.Provider, attestationVariant variant.Variant, k
// that the new release is installed after the existing one to avoid name conflicts.
type releaseApplyOrder []release
+// OpenStackValues are helm values for OpenStack.
+type OpenStackValues struct {
+ DeployYawolLoadBalancer bool
+ FloatingIPPoolID string
+ YawolFlavorID string
+ YawolImageID string
+}
+
// loadReleases loads the embedded helm charts and returns them as a HelmReleases object.
func (i *chartLoader) loadReleases(conformanceMode, deployCSIDriver bool, helmWaitMode WaitMode, masterSecret uri.MasterSecret,
- serviceAccURI string, openStackCfg *config.OpenStackConfig,
+ serviceAccURI string, openStackValues *OpenStackValues,
) (releaseApplyOrder, error) {
ciliumRelease, err := i.loadRelease(ciliumInfo, helmWaitMode)
if err != nil {
@@ -143,7 +150,7 @@ func (i *chartLoader) loadReleases(conformanceMode, deployCSIDriver bool, helmWa
}
svcVals, err := extraConstellationServicesValues(i.csp, i.attestationVariant, masterSecret,
- serviceAccURI, i.stateFile.Infrastructure, openStackCfg)
+ serviceAccURI, i.stateFile.Infrastructure, openStackValues)
if err != nil {
return nil, fmt.Errorf("extending constellation-services values: %w", err)
}
@@ -169,18 +176,23 @@ func (i *chartLoader) loadReleases(conformanceMode, deployCSIDriver bool, helmWa
}
releases = append(releases, awsRelease)
}
- if i.csp == cloudprovider.OpenStack && openStackCfg.DeployYawolLoadBalancer != nil && *openStackCfg.DeployYawolLoadBalancer {
- yawolRelease, err := i.loadRelease(yawolLBControllerInfo, WaitModeNone)
- if err != nil {
- return nil, fmt.Errorf("loading yawol chart: %w", err)
+ if i.csp == cloudprovider.OpenStack {
+ if openStackValues == nil {
+ return nil, errors.New("provider is OpenStack but OpenStack config is missing")
}
+ if openStackValues.DeployYawolLoadBalancer {
+ yawolRelease, err := i.loadRelease(yawolLBControllerInfo, WaitModeNone)
+ if err != nil {
+ return nil, fmt.Errorf("loading yawol chart: %w", err)
+ }
- yawolVals, err := extraYawolValues(serviceAccURI, i.stateFile.Infrastructure, openStackCfg)
- if err != nil {
- return nil, fmt.Errorf("extending yawol chart values: %w", err)
+ yawolVals, err := extraYawolValues(serviceAccURI, i.stateFile.Infrastructure, openStackValues)
+ if err != nil {
+ return nil, fmt.Errorf("extending yawol chart values: %w", err)
+ }
+ yawolRelease.values = mergeMaps(yawolRelease.values, yawolVals)
+ releases = append(releases, yawolRelease)
}
- yawolRelease.values = mergeMaps(yawolRelease.values, yawolVals)
- releases = append(releases, yawolRelease)
}
return releases, nil
@@ -347,7 +359,7 @@ func (i *chartLoader) cspTags() map[string]any {
func (i *chartLoader) loadCiliumValues(cloudprovider.Provider) (map[string]any, error) {
sharedConfig := map[string]any{
- "extraArgs": []string{"--node-encryption-opt-out-labels=invalid.label"},
+ "extraArgs": []string{"--node-encryption-opt-out-labels=invalid.label", "--bpf-filter-priority=128"},
"endpointRoutes": map[string]any{
"enabled": true,
},
@@ -400,6 +412,7 @@ func (i *chartLoader) loadCiliumValues(cloudprovider.Provider) (map[string]any,
"kubeProxyReplacement": "strict",
"enableCiliumEndpointSlice": true,
"kubeProxyReplacementHealthzBindAddr": "0.0.0.0:10256",
+ "cleanBpfState": true,
}
cspOverrideConfigs := map[string]map[string]any{
cloudprovider.AWS.String(): {},
diff --git a/internal/constellation/helm/loader_test.go b/internal/constellation/helm/loader_test.go
index 9f394e2ea..762f544b3 100644
--- a/internal/constellation/helm/loader_test.go
+++ b/internal/constellation/helm/loader_test.go
@@ -175,6 +175,19 @@ func TestConstellationServices(t *testing.T) {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
+ var openstackValues *OpenStackValues
+ if tc.config.Provider.OpenStack != nil {
+ var deploy bool
+ if tc.config.Provider.OpenStack.DeployYawolLoadBalancer != nil {
+ deploy = *tc.config.Provider.OpenStack.DeployYawolLoadBalancer
+ }
+ openstackValues = &OpenStackValues{
+ DeployYawolLoadBalancer: deploy,
+ FloatingIPPoolID: tc.config.Provider.OpenStack.FloatingIPPoolID,
+ YawolFlavorID: tc.config.Provider.OpenStack.YawolFlavorID,
+ YawolImageID: tc.config.Provider.OpenStack.YawolImageID,
+ }
+ }
chartLoader := chartLoader{
csp: tc.config.GetProvider(),
@@ -199,7 +212,7 @@ func TestConstellationServices(t *testing.T) {
UID: "uid",
Azure: &state.Azure{},
GCP: &state.GCP{},
- }, tc.config.Provider.OpenStack)
+ }, openstackValues)
require.NoError(err)
values = mergeMaps(values, extraVals)
diff --git a/internal/constellation/helm/overrides.go b/internal/constellation/helm/overrides.go
index 6dfea0c2c..2ef690935 100644
--- a/internal/constellation/helm/overrides.go
+++ b/internal/constellation/helm/overrides.go
@@ -18,7 +18,6 @@ import (
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/cloud/gcpshared"
"github.com/edgelesssys/constellation/v2/internal/cloud/openstack"
- "github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
"github.com/edgelesssys/constellation/v2/internal/constellation/state"
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
@@ -34,14 +33,6 @@ import (
// Also, the charts are not rendered correctly without all of these values.
func extraCiliumValues(provider cloudprovider.Provider, conformanceMode bool, output state.Infrastructure) map[string]any {
extraVals := map[string]any{}
- if conformanceMode {
- extraVals["kubeProxyReplacementHealthzBindAddr"] = ""
- extraVals["kubeProxyReplacement"] = "partial"
- extraVals["sessionAffinity"] = true
- extraVals["cni"] = map[string]any{
- "chainingMode": "portmap",
- }
- }
strictMode := map[string]any{}
// TODO(@3u13r): Once we are able to set the subnet of the load balancer VMs
@@ -76,6 +67,28 @@ func extraCiliumValues(provider cloudprovider.Provider, conformanceMode bool, ou
},
}
+ // When --conformance is set, we try to mitigate https://github.com/cilium/cilium/issues/9207
+ // Users are discouraged of ever using this mode, except if they truly
+ // require protocol differentiation to work and cannot mitigate that any other way.
+ // Since there should always be workarounds, we only support this mode to
+ // pass the K8s conformance tests. It is not supported to switch to or from
+ // this mode after Constellation has been initialized.
+ // This only works for the K8s conformance tests up to K8s 1.28.
+ if conformanceMode {
+ extraVals["kubeProxyReplacementHealthzBindAddr"] = ""
+ extraVals["kubeProxyReplacement"] = "false"
+ extraVals["sessionAffinity"] = true
+ extraVals["cni"] = map[string]any{
+ "chainingMode": "portmap",
+ }
+ extraVals["ipMasqAgent"] = map[string]any{
+ "enabled": false,
+ }
+ extraVals["bpf"] = map[string]any{
+ "masquerade": false,
+ }
+ }
+
return extraVals
}
@@ -83,7 +96,7 @@ func extraCiliumValues(provider cloudprovider.Provider, conformanceMode bool, ou
// Values set inside this function are only applied during init, not during upgrade.
func extraConstellationServicesValues(
csp cloudprovider.Provider, attestationVariant variant.Variant, masterSecret uri.MasterSecret, serviceAccURI string,
- output state.Infrastructure, openStackCfg *config.OpenStackConfig,
+ output state.Infrastructure, openStackCfg *OpenStackValues,
) (map[string]any, error) {
extraVals := map[string]any{}
extraVals["join-service"] = map[string]any{
@@ -152,7 +165,7 @@ func extraConstellationServicesValues(
// extraYawolValues extends the given values map by some values depending on user input.
// Values set inside this function are only applied during init, not during upgrade.
-func extraYawolValues(serviceAccURI string, output state.Infrastructure, openStackCfg *config.OpenStackConfig) (map[string]any, error) {
+func extraYawolValues(serviceAccURI string, output state.Infrastructure, openStackCfg *OpenStackValues) (map[string]any, error) {
extraVals := map[string]any{}
creds, err := openstack.AccountKeyFromURI(serviceAccURI)
@@ -163,7 +176,7 @@ func extraYawolValues(serviceAccURI string, output state.Infrastructure, openSta
extraVals["yawol-config"] = map[string]any{
"secretData": yawolIni,
}
- if openStackCfg.DeployYawolLoadBalancer != nil && *openStackCfg.DeployYawolLoadBalancer {
+ if openStackCfg != nil && openStackCfg.DeployYawolLoadBalancer {
extraVals["yawol-controller"] = map[string]any{
"yawolOSSecretName": "yawolkey",
// has to be larger than ~30s to account for slow OpenStack API calls.
diff --git a/internal/constellation/helm/retryaction.go b/internal/constellation/helm/retryaction.go
index 89579356e..7117f0b45 100644
--- a/internal/constellation/helm/retryaction.go
+++ b/internal/constellation/helm/retryaction.go
@@ -49,7 +49,7 @@ func retryApply(ctx context.Context, action retrieableApplier, retryInterval tim
return fmt.Errorf("helm install: %w", err)
}
retryLoopFinishDuration := time.Since(retryLoopStartTime)
- log.Debug(fmt.Sprintf("Helm chart %q installation finished after %s", action.ReleaseName(), retryLoopFinishDuration))
+ log.Debug(fmt.Sprintf("Helm chart %q installation finished after %q", action.ReleaseName(), retryLoopFinishDuration))
return nil
}
@@ -61,9 +61,9 @@ type applyDoer struct {
// Do tries to apply the action.
func (i applyDoer) Do(ctx context.Context) error {
- i.log.Debug(fmt.Sprintf("Trying to apply Helm chart %s", i.applier.ReleaseName()))
+ i.log.Debug(fmt.Sprintf("Trying to apply Helm chart %q", i.applier.ReleaseName()))
if err := i.applier.apply(ctx); err != nil {
- i.log.Debug(fmt.Sprintf("Helm chart installation %s failed: %v", i.applier.ReleaseName(), err))
+ i.log.Debug(fmt.Sprintf("Helm chart installation %q failed: %q", i.applier.ReleaseName(), err))
return err
}
diff --git a/internal/constellation/kubecmd/backup.go b/internal/constellation/kubecmd/backup.go
index c7e32d5be..e4ad27633 100644
--- a/internal/constellation/kubecmd/backup.go
+++ b/internal/constellation/kubecmd/backup.go
@@ -39,7 +39,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr
for i := range crds {
path := filepath.Join(crdBackupFolder, crds[i].Name+".yaml")
- k.log.Debug(fmt.Sprintf("Creating CRD backup: %s", path))
+ k.log.Debug("Creating CRD backup", "path", path)
// We have to manually set kind/apiversion because of a long-standing limitation of the API:
// https://github.com/kubernetes/kubernetes/issues/3030#issuecomment-67543738
@@ -64,7 +64,7 @@ func (k *KubeCmd) BackupCRDs(ctx context.Context, fileHandler file.Handler, upgr
func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds []apiextensionsv1.CustomResourceDefinition, upgradeDir string) error {
k.log.Debug("Starting CR backup")
for _, crd := range crds {
- k.log.Debug(fmt.Sprintf("Creating backup for resource type: %s", crd.Name))
+ k.log.Debug("Creating backup", "crdName", crd.Name)
// Iterate over all versions of the CRD
// TODO(daniel-weisse): Consider iterating over crd.Status.StoredVersions instead
@@ -72,7 +72,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds
// a version that is not installed in the cluster.
// With the StoredVersions field, we could only iterate over the installed versions.
for _, version := range crd.Spec.Versions {
- k.log.Debug(fmt.Sprintf("Creating backup of CRs for %q at version %q", crd.Name, version.Name))
+ k.log.Debug("Starting CustomResource backup", "crdName", crd.Name, "version", version.Name)
gvr := schema.GroupVersionResource{Group: crd.Spec.Group, Version: version.Name, Resource: crd.Spec.Names.Plural}
crs, err := k.kubectl.ListCRs(ctx, gvr)
@@ -80,7 +80,7 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds
if !k8serrors.IsNotFound(err) {
return fmt.Errorf("retrieving CR %s: %w", crd.Name, err)
}
- k.log.Debug(fmt.Sprintf("No CRs found for %q at version %q, skipping...", crd.Name, version.Name))
+ k.log.Debug("No CustomResources found. Skipping...", "crdName", crd.Name, "version", version.Name)
continue
}
@@ -101,9 +101,9 @@ func (k *KubeCmd) BackupCRs(ctx context.Context, fileHandler file.Handler, crds
}
}
- k.log.Debug(fmt.Sprintf("Backup for resource type %q complete", crd.Name))
+ k.log.Debug("CustomResource backup complete", "crdName", crd.Name)
}
- k.log.Debug("CR backup complete")
+ k.log.Debug("All CustomResource backups completed")
return nil
}
diff --git a/internal/constellation/kubecmd/kubecmd.go b/internal/constellation/kubecmd/kubecmd.go
index dedb4539b..1ebf99265 100644
--- a/internal/constellation/kubecmd/kubecmd.go
+++ b/internal/constellation/kubecmd/kubecmd.go
@@ -47,10 +47,6 @@ import (
"sigs.k8s.io/yaml"
)
-const (
- maxRetryAttempts = 20
-)
-
// ErrInProgress signals that an upgrade is in progress inside the cluster.
var ErrInProgress = errors.New("upgrade in progress")
@@ -69,6 +65,7 @@ func (e *applyError) Error() string {
type KubeCmd struct {
kubectl kubectlInterface
retryInterval time.Duration
+ maxAttempts int
log debugLog
}
@@ -82,6 +79,7 @@ func New(kubeConfig []byte, log debugLog) (*KubeCmd, error) {
return &KubeCmd{
kubectl: client,
retryInterval: time.Second * 5,
+ maxAttempts: 20,
log: log,
}, nil
}
@@ -103,7 +101,7 @@ func (k *KubeCmd) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semv
return fmt.Errorf("updating image version: %w", err)
}
- k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.String()))
+ k.log.Debug("Updating local copy of nodeVersion image version", "oldVersion", nodeVersion.Spec.ImageVersion, "newVersion", imageVersion.String())
nodeVersion.Spec.ImageReference = imageReference
nodeVersion.Spec.ImageVersion = imageVersion.String()
@@ -121,41 +119,31 @@ func (k *KubeCmd) UpgradeKubernetesVersion(ctx context.Context, kubernetesVersio
return err
}
- var upgradeErr *compatibility.InvalidUpgradeError
// We have to allow users to specify outdated k8s patch versions.
// Therefore, this code has to skip k8s updates if a user configures an outdated (i.e. invalid) k8s version.
- var components *corev1.ConfigMap
- _, err = versions.NewValidK8sVersion(string(kubernetesVersion), true)
- if err != nil {
- err = compatibility.NewInvalidUpgradeError(
+ if _, err := versions.NewValidK8sVersion(string(kubernetesVersion), true); err != nil {
+ return fmt.Errorf("skipping Kubernetes upgrade: %w", compatibility.NewInvalidUpgradeError(
nodeVersion.Spec.KubernetesClusterVersion,
string(kubernetesVersion),
- fmt.Errorf("unsupported Kubernetes version, supported versions are %s", strings.Join(versions.SupportedK8sVersions(), ", ")),
+ fmt.Errorf("unsupported Kubernetes version, supported versions are %s", strings.Join(versions.SupportedK8sVersions(), ", "))),
)
- } else {
- versionConfig, ok := versions.VersionConfigs[kubernetesVersion]
- if !ok {
- err = compatibility.NewInvalidUpgradeError(
- nodeVersion.Spec.KubernetesClusterVersion,
- string(kubernetesVersion),
- fmt.Errorf("no version config matching K8s %s", kubernetesVersion),
- )
- } else {
- components, err = k.prepareUpdateK8s(&nodeVersion, versionConfig.ClusterVersion,
- versionConfig.KubernetesComponents, force)
- }
}
- switch {
- case err == nil:
- err := k.applyComponentsCM(ctx, components)
- if err != nil {
- return fmt.Errorf("applying k8s components ConfigMap: %w", err)
- }
- case errors.As(err, &upgradeErr):
- return fmt.Errorf("skipping Kubernetes upgrade: %w", err)
- default:
- return fmt.Errorf("updating Kubernetes version: %w", err)
+ versionConfig, ok := versions.VersionConfigs[kubernetesVersion]
+ if !ok {
+ return fmt.Errorf("skipping Kubernetes upgrade: %w", compatibility.NewInvalidUpgradeError(
+ nodeVersion.Spec.KubernetesClusterVersion,
+ string(kubernetesVersion),
+ fmt.Errorf("no version config matching K8s %s", kubernetesVersion),
+ ))
+ }
+ components, err := k.prepareUpdateK8s(&nodeVersion, versionConfig.ClusterVersion, versionConfig.KubernetesComponents, force)
+ if err != nil {
+ return err
+ }
+
+ if err := k.applyComponentsCM(ctx, components); err != nil {
+ return fmt.Errorf("applying k8s components ConfigMap: %w", err)
}
updatedNodeVersion, err := k.applyNodeVersion(ctx, nodeVersion)
@@ -167,8 +155,13 @@ func (k *KubeCmd) UpgradeKubernetesVersion(ctx context.Context, kubernetesVersio
// ClusterStatus returns a map from node name to NodeStatus.
func (k *KubeCmd) ClusterStatus(ctx context.Context) (map[string]NodeStatus, error) {
- nodes, err := k.kubectl.GetNodes(ctx)
- if err != nil {
+ var nodes []corev1.Node
+
+ if err := k.retryAction(ctx, func(ctx context.Context) error {
+ var err error
+ nodes, err = k.kubectl.GetNodes(ctx)
+ return err
+ }); err != nil {
return nil, fmt.Errorf("getting nodes: %w", err)
}
@@ -183,7 +176,7 @@ func (k *KubeCmd) ClusterStatus(ctx context.Context) (map[string]NodeStatus, err
// GetClusterAttestationConfig fetches the join-config configmap from the cluster,
// and returns the attestation config.
func (k *KubeCmd) GetClusterAttestationConfig(ctx context.Context, variant variant.Variant) (config.AttestationCfg, error) {
- existingConf, err := retryGetJoinConfig(ctx, k.kubectl, k.retryInterval, k.log)
+ existingConf, err := k.retryGetJoinConfig(ctx)
if err != nil {
return nil, fmt.Errorf("retrieving current attestation config: %w", err)
}
@@ -208,19 +201,19 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At
return fmt.Errorf("marshaling attestation config: %w", err)
}
- joinConfig, err := retryGetJoinConfig(ctx, k.kubectl, k.retryInterval, k.log)
+ joinConfig, err := k.retryGetJoinConfig(ctx)
if err != nil {
if !k8serrors.IsNotFound(err) {
return fmt.Errorf("getting %s ConfigMap: %w", constants.JoinConfigMap, err)
}
- k.log.Debug(fmt.Sprintf("ConfigMap %q does not exist in namespace %q, creating it now", constants.JoinConfigMap, constants.ConstellationNamespace))
- if err := retryAction(ctx, k.retryInterval, maxRetryAttempts, func(ctx context.Context) error {
+ k.log.Debug("ConfigMap does not exist, creating it now", "name", constants.JoinConfigMap, "namespace", constants.ConstellationNamespace)
+ if err := k.retryAction(ctx, func(ctx context.Context) error {
return k.kubectl.CreateConfigMap(ctx, joinConfigMap(newConfigJSON, measurementSalt))
- }, k.log); err != nil {
+ }); err != nil {
return fmt.Errorf("creating join-config ConfigMap: %w", err)
}
- k.log.Debug(fmt.Sprintf("Created %q ConfigMap in namespace %q", constants.JoinConfigMap, constants.ConstellationNamespace))
+ k.log.Debug("Created ConfigMap", "name", constants.JoinConfigMap, "namespace", constants.ConstellationNamespace)
return nil
}
@@ -228,10 +221,10 @@ func (k *KubeCmd) ApplyJoinConfig(ctx context.Context, newAttestConfig config.At
joinConfig.Data[constants.AttestationConfigFilename+"_backup"] = joinConfig.Data[constants.AttestationConfigFilename]
joinConfig.Data[constants.AttestationConfigFilename] = string(newConfigJSON)
k.log.Debug("Triggering attestation config update now")
- if err := retryAction(ctx, k.retryInterval, maxRetryAttempts, func(ctx context.Context) error {
+ if err := k.retryAction(ctx, func(ctx context.Context) error {
_, err = k.kubectl.UpdateConfigMap(ctx, joinConfig)
return err
- }, k.log); err != nil {
+ }); err != nil {
return fmt.Errorf("setting new attestation config: %w", err)
}
@@ -266,7 +259,7 @@ func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNa
k.log.Debug("No new SANs to add to the cluster's apiserver SAN field")
return nil
}
- k.log.Debug(fmt.Sprintf("Extending the cluster's apiserver SAN field with the following SANs: %s\n", strings.Join(missingSANs, ", ")))
+ k.log.Debug("Extending the cluster's apiserver SAN field", "certSANs", strings.Join(missingSANs, ", "))
clusterConfiguration.APIServer.CertSANs = append(clusterConfiguration.APIServer.CertSANs, missingSANs...)
sort.Strings(clusterConfiguration.APIServer.CertSANs)
@@ -278,7 +271,10 @@ func (k *KubeCmd) ExtendClusterConfigCertSANs(ctx context.Context, alternativeNa
kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML)
k.log.Debug("Triggering kubeadm config update now")
- if _, err = k.kubectl.UpdateConfigMap(ctx, kubeadmConfig); err != nil {
+ if err = k.retryAction(ctx, func(ctx context.Context) error {
+ _, err := k.kubectl.UpdateConfigMap(ctx, kubeadmConfig)
+ return err
+ }); err != nil {
return fmt.Errorf("setting new kubeadm config: %w", err)
}
@@ -299,14 +295,19 @@ func (k *KubeCmd) GetConstellationVersion(ctx context.Context) (NodeVersion, err
// getConstellationVersion returns the NodeVersion object of a Constellation cluster.
func (k *KubeCmd) getConstellationVersion(ctx context.Context) (updatev1alpha1.NodeVersion, error) {
- raw, err := k.kubectl.GetCR(ctx, schema.GroupVersionResource{
- Group: "update.edgeless.systems",
- Version: "v1alpha1",
- Resource: "nodeversions",
- }, constants.NodeVersionResourceName)
- if err != nil {
+ var raw *unstructured.Unstructured
+ if err := k.retryAction(ctx, func(ctx context.Context) error {
+ var err error
+ raw, err = k.kubectl.GetCR(ctx, schema.GroupVersionResource{
+ Group: "update.edgeless.systems",
+ Version: "v1alpha1",
+ Resource: "nodeversions",
+ }, constants.NodeVersionResourceName)
+ return err
+ }); err != nil {
return updatev1alpha1.NodeVersion{}, err
}
+
var nodeVersion updatev1alpha1.NodeVersion
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(raw.UnstructuredContent(), &nodeVersion); err != nil {
return updatev1alpha1.NodeVersion{}, fmt.Errorf("converting unstructured to NodeVersion: %w", err)
@@ -318,10 +319,15 @@ func (k *KubeCmd) getConstellationVersion(ctx context.Context) (updatev1alpha1.N
// getClusterConfiguration fetches the kubeadm-config configmap from the cluster, extracts the config
// and returns both the full configmap and the ClusterConfiguration.
func (k *KubeCmd) getClusterConfiguration(ctx context.Context) (kubeadmv1beta3.ClusterConfiguration, *corev1.ConfigMap, error) {
- existingConf, err := k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.KubeadmConfigMap)
- if err != nil {
+ var existingConf *corev1.ConfigMap
+ if err := k.retryAction(ctx, func(ctx context.Context) error {
+ var err error
+ existingConf, err = k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.KubeadmConfigMap)
+ return err
+ }); err != nil {
return kubeadmv1beta3.ClusterConfiguration{}, nil, fmt.Errorf("retrieving current kubeadm-config: %w", err)
}
+
clusterConf, ok := existingConf.Data[constants.ClusterConfigurationKey]
if !ok {
return kubeadmv1beta3.ClusterConfiguration{}, nil, errors.New("ClusterConfiguration missing from kubeadm-config")
@@ -337,9 +343,16 @@ func (k *KubeCmd) getClusterConfiguration(ctx context.Context) (kubeadmv1beta3.C
// applyComponentsCM applies the k8s components ConfigMap to the cluster.
func (k *KubeCmd) applyComponentsCM(ctx context.Context, components *corev1.ConfigMap) error {
- // If the map already exists we can use that map and assume it has the same content as 'configMap'.
- if err := k.kubectl.CreateConfigMap(ctx, components); err != nil && !k8serrors.IsAlreadyExists(err) {
- return fmt.Errorf("creating k8s-components ConfigMap: %w. %T", err, err)
+ if err := k.retryAction(ctx, func(ctx context.Context) error {
+ // If the components ConfigMap already exists we assume it is up to date,
+ // since its name is derived from a hash of its contents.
+ err := k.kubectl.CreateConfigMap(ctx, components)
+ if err != nil && !k8serrors.IsAlreadyExists(err) {
+ return err
+ }
+ return nil
+ }); err != nil {
+ return fmt.Errorf("creating k8s-components ConfigMap: %w", err)
}
return nil
}
@@ -347,31 +360,35 @@ func (k *KubeCmd) applyComponentsCM(ctx context.Context, components *corev1.Conf
func (k *KubeCmd) applyNodeVersion(ctx context.Context, nodeVersion updatev1alpha1.NodeVersion) (updatev1alpha1.NodeVersion, error) {
k.log.Debug("Triggering NodeVersion upgrade now")
var updatedNodeVersion updatev1alpha1.NodeVersion
- err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
- newNode, err := k.getConstellationVersion(ctx)
- if err != nil {
- return fmt.Errorf("retrieving current NodeVersion: %w", err)
- }
- updateNodeVersions(nodeVersion, &newNode)
+ // Retry the entire "retry-on-conflict" block to retry if the block fails, e.g. due to etcd timeouts.
+ err := k.retryAction(ctx, func(ctx context.Context) error {
+ return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
+ newNode, err := k.getConstellationVersion(ctx)
+ if err != nil {
+ return fmt.Errorf("retrieving current NodeVersion: %w", err)
+ }
- raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&newNode)
- if err != nil {
- return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
- }
- updated, err := k.kubectl.UpdateCR(ctx, schema.GroupVersionResource{
- Group: "update.edgeless.systems",
- Version: "v1alpha1",
- Resource: "nodeversions",
- }, &unstructured.Unstructured{Object: raw})
- if err != nil {
- return err
- }
+ updateNodeVersions(nodeVersion, &newNode)
- if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updated.UnstructuredContent(), &updatedNodeVersion); err != nil {
- return fmt.Errorf("converting unstructured to NodeVersion: %w", err)
- }
- return nil
+ raw, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&newNode)
+ if err != nil {
+ return fmt.Errorf("converting nodeVersion to unstructured: %w", err)
+ }
+ updated, err := k.kubectl.UpdateCR(ctx, schema.GroupVersionResource{
+ Group: "update.edgeless.systems",
+ Version: "v1alpha1",
+ Resource: "nodeversions",
+ }, &unstructured.Unstructured{Object: raw})
+ if err != nil {
+ return err
+ }
+
+ if err := runtime.DefaultUnstructuredConverter.FromUnstructured(updated.UnstructuredContent(), &updatedNodeVersion); err != nil {
+ return fmt.Errorf("converting unstructured to NodeVersion: %w", err)
+ }
+ return nil
+ })
})
return updatedNodeVersion, err
@@ -405,17 +422,52 @@ func (k *KubeCmd) prepareUpdateK8s(nodeVersion *updatev1alpha1.NodeVersion, newC
}
if !force {
if err := compatibility.IsValidUpgrade(nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion); err != nil {
- return nil, err
+ return nil, fmt.Errorf("skipping Kubernetes upgrade: %w", err)
}
}
- k.log.Debug(fmt.Sprintf("Updating local copy of nodeVersion Kubernetes version from %s to %s", nodeVersion.Spec.KubernetesClusterVersion, newClusterVersion))
+ k.log.Debug("Updating local copy of nodeVersion Kubernetes version", "oldVersion", nodeVersion.Spec.KubernetesClusterVersion, "newVersion", newClusterVersion)
nodeVersion.Spec.KubernetesComponentsReference = configMap.ObjectMeta.Name
nodeVersion.Spec.KubernetesClusterVersion = newClusterVersion
return &configMap, nil
}
+func (k *KubeCmd) retryGetJoinConfig(ctx context.Context) (*corev1.ConfigMap, error) {
+ var ctr int
+ retrieable := func(err error) bool {
+ if k8serrors.IsNotFound(err) {
+ return false
+ }
+ ctr++
+ k.log.Debug("Getting join-config ConfigMap failed", "attempt", ctr, "maxAttempts", k.maxAttempts, "error", err)
+ return ctr < k.maxAttempts
+ }
+
+ var joinConfig *corev1.ConfigMap
+ var err error
+ doer := &kubeDoer{
+ action: func(ctx context.Context) error {
+ joinConfig, err = k.kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.JoinConfigMap)
+ return err
+ },
+ }
+ retrier := conretry.NewIntervalRetrier(doer, k.retryInterval, retrieable)
+
+ err = retrier.Do(ctx)
+ return joinConfig, err
+}
+
+func (k *KubeCmd) retryAction(ctx context.Context, action func(ctx context.Context) error) error {
+ ctr := 0
+ retrier := conretry.NewIntervalRetrier(&kubeDoer{action: action}, k.retryInterval, func(err error) bool {
+ ctr++
+ k.log.Debug("Action failed", "attempt", ctr, "maxAttempts", k.maxAttempts, "error", err)
+ return ctr < k.maxAttempts
+ })
+ return retrier.Do(ctx)
+}
+
func checkForApplyError(expected, actual updatev1alpha1.NodeVersion) error {
var err error
switch {
@@ -454,41 +506,6 @@ func (k *kubeDoer) Do(ctx context.Context) error {
return k.action(ctx)
}
-func retryGetJoinConfig(ctx context.Context, kubectl kubectlInterface, retryInterval time.Duration, log debugLog) (*corev1.ConfigMap, error) {
- var retries int
- retrieable := func(err error) bool {
- if k8serrors.IsNotFound(err) {
- return false
- }
- retries++
- log.Debug(fmt.Sprintf("Getting join-config ConfigMap failed (attempt %d/%d): %s", retries, maxRetryAttempts, err))
- return retries < maxRetryAttempts
- }
-
- var joinConfig *corev1.ConfigMap
- var err error
- doer := &kubeDoer{
- action: func(ctx context.Context) error {
- joinConfig, err = kubectl.GetConfigMap(ctx, constants.ConstellationNamespace, constants.JoinConfigMap)
- return err
- },
- }
- retrier := conretry.NewIntervalRetrier(doer, retryInterval, retrieable)
-
- err = retrier.Do(ctx)
- return joinConfig, err
-}
-
-func retryAction(ctx context.Context, retryInterval time.Duration, maxRetries int, action func(ctx context.Context) error, log debugLog) error {
- ctr := 0
- retrier := conretry.NewIntervalRetrier(&kubeDoer{action: action}, retryInterval, func(err error) bool {
- ctr++
- log.Debug(fmt.Sprintf("Action failed (attempt %d/%d): %s", ctr, maxRetries, err))
- return ctr < maxRetries
- })
- return retrier.Do(ctx)
-}
-
// kubectlInterface provides access to the Kubernetes API.
type kubectlInterface interface {
GetNodes(ctx context.Context) ([]corev1.Node, error)
diff --git a/internal/constellation/kubecmd/kubecmd_test.go b/internal/constellation/kubecmd/kubecmd_test.go
index cdaf99921..74e9562c1 100644
--- a/internal/constellation/kubecmd/kubecmd_test.go
+++ b/internal/constellation/kubecmd/kubecmd_test.go
@@ -174,8 +174,10 @@ func TestUpgradeNodeImage(t *testing.T) {
}
upgrader := KubeCmd{
- kubectl: kubectl,
- log: logger.NewTest(t),
+ kubectl: kubectl,
+ retryInterval: time.Millisecond,
+ maxAttempts: 5,
+ log: logger.NewTest(t),
}
err = upgrader.UpgradeNodeImage(context.Background(), tc.newImageVersion, fmt.Sprintf("/path/to/image:%s", tc.newImageVersion.String()), tc.force)
@@ -285,8 +287,10 @@ func TestUpgradeKubernetesVersion(t *testing.T) {
}
upgrader := KubeCmd{
- kubectl: kubectl,
- log: logger.NewTest(t),
+ kubectl: kubectl,
+ retryInterval: time.Millisecond,
+ maxAttempts: 5,
+ log: logger.NewTest(t),
}
err = upgrader.UpgradeKubernetesVersion(context.Background(), tc.newKubernetesVersion, tc.force)
@@ -341,7 +345,9 @@ func TestIsValidImageUpgrade(t *testing.T) {
assert := assert.New(t)
upgrader := &KubeCmd{
- log: logger.NewTest(t),
+ retryInterval: time.Millisecond,
+ maxAttempts: 5,
+ log: logger.NewTest(t),
}
nodeVersion := updatev1alpha1.NodeVersion{
@@ -392,7 +398,9 @@ func TestUpdateK8s(t *testing.T) {
assert := assert.New(t)
upgrader := &KubeCmd{
- log: logger.NewTest(t),
+ retryInterval: time.Millisecond,
+ maxAttempts: 5,
+ log: logger.NewTest(t),
}
nodeVersion := updatev1alpha1.NodeVersion{
@@ -589,6 +597,7 @@ func TestApplyJoinConfig(t *testing.T) {
kubectl: tc.kubectl,
log: logger.NewTest(t),
retryInterval: time.Millisecond,
+ maxAttempts: 5,
}
err := cmd.ApplyJoinConfig(context.Background(), tc.newAttestationCfg, []byte{0x11})
@@ -611,6 +620,62 @@ func TestApplyJoinConfig(t *testing.T) {
}
}
+func TestRetryAction(t *testing.T) {
+ maxAttempts := 3
+
+ testCases := map[string]struct {
+ failures int
+ wantErr bool
+ }{
+ "no failures": {
+ failures: 0,
+ },
+ "fail once": {
+ failures: 1,
+ },
+ "fail equal to maxAttempts": {
+ failures: maxAttempts,
+ wantErr: true,
+ },
+ "fail more than maxAttempts": {
+ failures: maxAttempts + 5,
+ wantErr: true,
+ },
+ }
+
+ for name, tc := range testCases {
+ t.Run(name, func(t *testing.T) {
+ k := &KubeCmd{
+ retryInterval: time.Millisecond,
+ maxAttempts: maxAttempts,
+ log: logger.NewTest(t),
+ }
+
+ errs := map[int]error{}
+ for idx := range tc.failures {
+ errs[idx] = assert.AnError
+ }
+
+ assert := assert.New(t)
+
+ failureCtr := 0
+ action := func(context.Context) error {
+ defer func() { failureCtr++ }()
+ return errs[failureCtr]
+ }
+
+ err := k.retryAction(context.Background(), action)
+ if tc.wantErr {
+ assert.Error(err)
+ assert.Equal(min(tc.failures, maxAttempts), failureCtr)
+ return
+ }
+ assert.NoError(err)
+ assert.Equal(tc.failures, failureCtr-1)
+ })
+ }
+}
+
type fakeUnstructuredClient struct {
mock.Mock
}
diff --git a/internal/grpc/grpclog/grpclog.go b/internal/grpc/grpclog/grpclog.go
index e29c990b6..be4d27ff3 100644
--- a/internal/grpc/grpclog/grpclog.go
+++ b/internal/grpc/grpclog/grpclog.go
@@ -31,15 +31,15 @@ func LogStateChangesUntilReady(ctx context.Context, conn getStater, log debugLog
go func() {
defer wg.Done()
state := conn.GetState()
- log.Debug(fmt.Sprintf("Connection state started as %s", state))
+ log.Debug(fmt.Sprintf("Connection state started as %q", state))
for ; state != connectivity.Ready && conn.WaitForStateChange(ctx, state); state = conn.GetState() {
- log.Debug(fmt.Sprintf("Connection state changed to %s", state))
+ log.Debug(fmt.Sprintf("Connection state changed to %q", state))
}
if state == connectivity.Ready {
log.Debug("Connection ready")
isReadyCallback()
} else {
- log.Debug(fmt.Sprintf("Connection state ended with %s", state))
+ log.Debug(fmt.Sprintf("Connection state ended with %q", state))
}
}()
}
diff --git a/internal/grpc/grpclog/grpclog_test.go b/internal/grpc/grpclog/grpclog_test.go
index 704f1a923..eb912521f 100644
--- a/internal/grpc/grpclog/grpclog_test.go
+++ b/internal/grpc/grpclog/grpclog_test.go
@@ -33,8 +33,8 @@ func TestLogStateChanges(t *testing.T) {
},
assert: func(t *testing.T, lg *spyLog, isReadyCallbackCalled bool) {
require.Len(t, lg.msgs, 3)
- assert.Equal(t, "Connection state started as CONNECTING", lg.msgs[0])
- assert.Equal(t, "Connection state changed to CONNECTING", lg.msgs[1])
+ assert.Equal(t, "Connection state started as \"CONNECTING\"", lg.msgs[0])
+ assert.Equal(t, "Connection state changed to \"CONNECTING\"", lg.msgs[1])
assert.Equal(t, "Connection ready", lg.msgs[2])
assert.True(t, isReadyCallbackCalled)
},
@@ -49,7 +49,7 @@ func TestLogStateChanges(t *testing.T) {
},
assert: func(t *testing.T, lg *spyLog, isReadyCallbackCalled bool) {
require.Len(t, lg.msgs, 2)
- assert.Equal(t, "Connection state started as READY", lg.msgs[0])
+ assert.Equal(t, "Connection state started as \"READY\"", lg.msgs[0])
assert.Equal(t, "Connection ready", lg.msgs[1])
assert.True(t, isReadyCallbackCalled)
},
@@ -64,8 +64,8 @@ func TestLogStateChanges(t *testing.T) {
},
assert: func(t *testing.T, lg *spyLog, isReadyCallbackCalled bool) {
require.Len(t, lg.msgs, 2)
- assert.Equal(t, "Connection state started as CONNECTING", lg.msgs[0])
- assert.Equal(t, "Connection state ended with CONNECTING", lg.msgs[1])
+ assert.Equal(t, "Connection state started as \"CONNECTING\"", lg.msgs[0])
+ assert.Equal(t, "Connection state ended with \"CONNECTING\"", lg.msgs[1])
assert.False(t, isReadyCallbackCalled)
},
},
diff --git a/internal/grpc/retry/retry.go b/internal/grpc/retry/retry.go
index 9d03279a4..b7457fc1f 100644
--- a/internal/grpc/retry/retry.go
+++ b/internal/grpc/retry/retry.go
@@ -16,9 +16,10 @@ import (
)
const (
- authEOFErr = `connection error: desc = "transport: authentication handshake failed: EOF"`
- authReadTCPErr = `connection error: desc = "transport: authentication handshake failed: read tcp`
- authHandshakeErr = `connection error: desc = "transport: authentication handshake failed`
+ authEOFErr = `connection error: desc = "transport: authentication handshake failed: EOF"`
+ authReadTCPErr = `connection error: desc = "transport: authentication handshake failed: read tcp`
+ authHandshakeErr = `connection error: desc = "transport: authentication handshake failed`
+ authHandshakeDeadlineExceededErr = `connection error: desc = "transport: authentication handshake failed: context deadline exceeded`
)
// grpcErr is the error type that is returned by the grpc client.
@@ -57,6 +58,11 @@ func ServiceIsUnavailable(err error) bool {
return true
}
+ // retry if the handshake deadline was exceeded
+ if strings.HasPrefix(statusErr.Message(), authHandshakeDeadlineExceededErr) {
+ return true
+ }
+
return !strings.HasPrefix(statusErr.Message(), authHandshakeErr)
}
@@ -76,6 +82,11 @@ func LoadbalancerIsNotReady(err error) bool {
return false
}
+ // retry if the handshake deadline was exceeded
+ if strings.HasPrefix(statusErr.Message(), authHandshakeDeadlineExceededErr) {
+ return true
+ }
+
// retry if GCP proxy LB isn't fully available yet
return strings.HasPrefix(statusErr.Message(), authReadTCPErr)
}
diff --git a/internal/grpc/retry/retry_test.go b/internal/grpc/retry/retry_test.go
index a1b44dce4..5e51e4bb0 100644
--- a/internal/grpc/retry/retry_test.go
+++ b/internal/grpc/retry/retry_test.go
@@ -43,6 +43,10 @@ func TestServiceIsUnavailable(t *testing.T) {
err: status.Error(codes.Unavailable, `connection error: desc = "transport: authentication handshake failed: read tcp error"`),
wantUnavailable: true,
},
+ "handshake deadline exceeded error": {
+ err: status.Error(codes.Unavailable, `connection error: desc = "transport: authentication handshake failed: context deadline exceeded"`),
+ wantUnavailable: true,
+ },
"wrapped error": {
err: fmt.Errorf("some wrapping: %w", status.Error(codes.Unavailable, "error")),
wantUnavailable: true,
@@ -82,6 +86,10 @@ func TestLoadbalancerIsNotReady(t *testing.T) {
err: status.Error(codes.Unavailable, `connection error: desc = "transport: authentication handshake failed: read tcp error"`),
wantNotReady: true,
},
+ "handshake deadline exceeded error": {
+ err: status.Error(codes.Unavailable, `connection error: desc = "transport: authentication handshake failed: context deadline exceeded"`),
+ wantNotReady: true,
+ },
"normal unavailable error": {
err: status.Error(codes.Unavailable, "error"),
},
diff --git a/internal/imagefetcher/imagefetcher.go b/internal/imagefetcher/imagefetcher.go
index 643e7c1b4..ebbf74e41 100644
--- a/internal/imagefetcher/imagefetcher.go
+++ b/internal/imagefetcher/imagefetcher.go
@@ -111,7 +111,7 @@ func buildMarketplaceImage(payload marketplaceImagePayload) (string, error) {
return "", fmt.Errorf("parsing image version: %w", err)
}
- if sv.Prerelease() != "" {
+ if sv.Prerelease() != "" && payload.provider != cloudprovider.OpenStack {
return "", fmt.Errorf("marketplace images are not supported for prerelease versions")
}
@@ -131,6 +131,9 @@ func buildMarketplaceImage(payload marketplaceImagePayload) (string, error) {
case cloudprovider.AWS:
// For AWS, we use the AMI alias, which just needs the version and infers the rest transparently.
return fmt.Sprintf("resolve:ssm:/aws/service/marketplace/prod-77ylkenlkgufs/%s", payload.imgInfo.Version), nil
+ case cloudprovider.OpenStack:
+ // For OpenStack / STACKIT, we use the image reference directly.
+ return getReferenceFromImageInfo(payload.provider, payload.attestationVariant.String(), payload.imgInfo, payload.filters...)
default:
return "", fmt.Errorf("marketplace images are not supported for csp %s", payload.provider.String())
}
diff --git a/internal/license/license.go b/internal/license/license.go
index 0bf1cb3fe..0010bd2d0 100644
--- a/internal/license/license.go
+++ b/internal/license/license.go
@@ -13,6 +13,8 @@ type Action string
const (
// CommunityLicense is used by everyone who has not bought an enterprise license.
CommunityLicense = "00000000-0000-0000-0000-000000000000"
+ // MarketplaceLicense is used by everyone who uses a marketplace image.
+ MarketplaceLicense = "11111111-1111-1111-1111-111111111111"
// Init action denotes the initialization of a Constellation cluster.
Init Action = "init"
diff --git a/internal/osimage/archive/archive.go b/internal/osimage/archive/archive.go
index f42b48e4c..f49cf0de8 100644
--- a/internal/osimage/archive/archive.go
+++ b/internal/osimage/archive/archive.go
@@ -74,7 +74,7 @@ func (a *Archivist) Archive(ctx context.Context, version versionsapi.Version, cs
if err != nil {
return "", err
}
- a.log.Debug(fmt.Sprintf("Archiving OS image %s %s %v to s3://%v/%v", csp, attestationVariant, version.ShortPath(), a.bucket, key))
+ a.log.Debug(fmt.Sprintf("Archiving OS image %q to s3://%s/%s", fmt.Sprintf("%s %s %v", csp, attestationVariant, version.ShortPath()), a.bucket, key))
_, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{
Bucket: &a.bucket,
Key: &key,
diff --git a/internal/osimage/imageinfo/imageinfo.go b/internal/osimage/imageinfo/imageinfo.go
index a26ab24a5..844690bd9 100644
--- a/internal/osimage/imageinfo/imageinfo.go
+++ b/internal/osimage/imageinfo/imageinfo.go
@@ -78,7 +78,7 @@ func (a *Uploader) Upload(ctx context.Context, imageInfo versionsapi.ImageInfo)
if err != nil {
return "", err
}
- a.log.Debug(fmt.Sprintf("Archiving image info to s3://%v/%v", a.bucket, key))
+ a.log.Debug(fmt.Sprintf("Archiving image info to s3://%s/%s", a.bucket, key))
buf := &bytes.Buffer{}
if err := json.NewEncoder(buf).Encode(imageInfo); err != nil {
return "", err
diff --git a/internal/osimage/measurementsuploader/measurementsuploader.go b/internal/osimage/measurementsuploader/measurementsuploader.go
index 1e6c9ffa0..59c2eecfe 100644
--- a/internal/osimage/measurementsuploader/measurementsuploader.go
+++ b/internal/osimage/measurementsuploader/measurementsuploader.go
@@ -92,7 +92,7 @@ func (a *Uploader) Upload(ctx context.Context, rawMeasurement, signature io.Read
if err != nil {
return "", "", err
}
- a.log.Debug(fmt.Sprintf("Archiving image measurements to s3://%v/%v and s3://%v/%v", a.bucket, key, a.bucket, sigKey))
+ a.log.Debug(fmt.Sprintf("Archiving image measurements to s3://%s/%s and s3://%s/%s", a.bucket, key, a.bucket, sigKey))
if _, err = a.uploadClient.Upload(ctx, &s3.PutObjectInput{
Bucket: &a.bucket,
Key: &key,
diff --git a/internal/osimage/nop/nop.go b/internal/osimage/nop/nop.go
index 5618acf03..883a7bf3c 100644
--- a/internal/osimage/nop/nop.go
+++ b/internal/osimage/nop/nop.go
@@ -28,6 +28,6 @@ func New(log *slog.Logger) *Uploader {
// Upload pretends to upload images to a csp.
func (u *Uploader) Upload(_ context.Context, req *osimage.UploadRequest) ([]versionsapi.ImageInfoEntry, error) {
- u.log.Debug(fmt.Sprintf("Skipping image upload of %s since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath()))
+ u.log.Debug(fmt.Sprintf("Skipping image upload of %q since this CSP does not require images to be uploaded in advance.", req.Version.ShortPath()))
return nil, nil
}
diff --git a/internal/staticupload/staticupload.go b/internal/staticupload/staticupload.go
index fd09734ad..5b68e8ae0 100644
--- a/internal/staticupload/staticupload.go
+++ b/internal/staticupload/staticupload.go
@@ -134,7 +134,7 @@ func (c *Client) Flush(ctx context.Context) error {
c.mux.Lock()
defer c.mux.Unlock()
- c.logger.Debug(fmt.Sprintf("Invalidating keys: %s", c.dirtyKeys))
+ c.logger.Debug(fmt.Sprintf("Invalidating keys: %q", c.dirtyKeys))
if len(c.dirtyKeys) == 0 {
return nil
}
@@ -219,7 +219,7 @@ func (c *Client) waitForInvalidations(ctx context.Context) error {
}
waiter := cloudfront.NewInvalidationCompletedWaiter(c.cdnClient)
- c.logger.Debug(fmt.Sprintf("Waiting for invalidations %s in distribution %s", c.invalidationIDs, c.distributionID))
+ c.logger.Debug(fmt.Sprintf("Waiting for invalidations %v in distribution %q", c.invalidationIDs, c.distributionID))
for _, invalidationID := range c.invalidationIDs {
waitIn := &cloudfront.GetInvalidationInput{
DistributionId: &c.distributionID,
diff --git a/internal/verify/verify.go b/internal/verify/verify.go
index 60b2e726e..8bd0eb25d 100644
--- a/internal/verify/verify.go
+++ b/internal/verify/verify.go
@@ -216,7 +216,7 @@ type Certificate struct {
func newCertificates(certTypeName string, cert []byte, log debugLog) (certs []Certificate, err error) {
newlinesTrimmed := strings.TrimSpace(string(cert))
- log.Debug(fmt.Sprintf("Decoding PEM certificate: %s", certTypeName))
+ log.Debug(fmt.Sprintf("Decoding PEM certificate: %q", certTypeName))
i := 1
var rest []byte
var block *pem.Block
diff --git a/internal/versions/components/components.pb.go b/internal/versions/components/components.pb.go
index 8293a1675..76fe28755 100644
--- a/internal/versions/components/components.pb.go
+++ b/internal/versions/components/components.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.22.1
// source: internal/versions/components/components.proto
diff --git a/joinservice/internal/certcache/amdkds/amdkds_test.go b/joinservice/internal/certcache/amdkds/amdkds_test.go
index 1ce3706a9..d7ede82f9 100644
--- a/joinservice/internal/certcache/amdkds/amdkds_test.go
+++ b/joinservice/internal/certcache/amdkds/amdkds_test.go
@@ -71,6 +71,6 @@ type stubGetter struct {
}
func (s *stubGetter) Get(url string) ([]byte, error) {
- s.log.Debug(fmt.Sprintf("Request to %s", url))
+ s.log.Debug(fmt.Sprintf("Request to %q", url))
return s.ret, s.err
}
diff --git a/joinservice/internal/certcache/certcache.go b/joinservice/internal/certcache/certcache.go
index cada6fd7c..ba88b67b4 100644
--- a/joinservice/internal/certcache/certcache.go
+++ b/joinservice/internal/certcache/certcache.go
@@ -53,11 +53,11 @@ func (c *Client) CreateCertChainCache(ctx context.Context) (*CachedCerts, error)
case variant.AWSSEVSNP{}:
reportSigner = abi.VlekReportSigner
default:
- c.log.Debug(fmt.Sprintf("No certificate chain caching possible for attestation variant %s", c.attVariant))
+ c.log.Debug(fmt.Sprintf("No certificate chain caching possible for attestation variant %q", c.attVariant))
return nil, nil
}
- c.log.Debug(fmt.Sprintf("Creating %s certificate chain cache", c.attVariant))
+ c.log.Debug(fmt.Sprintf("Creating %q certificate chain cache", c.attVariant))
ask, ark, err := c.createCertChainCache(ctx, reportSigner)
if err != nil {
return nil, fmt.Errorf("creating %s certificate chain cache: %w", c.attVariant, err)
diff --git a/joinservice/internal/watcher/validator.go b/joinservice/internal/watcher/validator.go
index 6bf43635e..2d6a3bd3b 100644
--- a/joinservice/internal/watcher/validator.go
+++ b/joinservice/internal/watcher/validator.go
@@ -79,7 +79,7 @@ func (u *Updatable) Update() error {
if err != nil {
return fmt.Errorf("unmarshaling config: %w", err)
}
- u.log.Debug(fmt.Sprintf("New expected measurements: %+v", cfg.GetMeasurements()))
+ u.log.Debug(fmt.Sprintf("New expected measurements: %s", cfg.GetMeasurements().String()))
cfgWithCerts, err := u.configWithCerts(cfg)
if err != nil {
diff --git a/joinservice/joinproto/join.pb.go b/joinservice/joinproto/join.pb.go
index 8afb7df19..5fe259256 100644
--- a/joinservice/joinproto/join.pb.go
+++ b/joinservice/joinproto/join.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.22.1
// source: joinservice/joinproto/join.proto
diff --git a/keyservice/keyserviceproto/keyservice.pb.go b/keyservice/keyserviceproto/keyservice.pb.go
index 59faea21d..65beb0c55 100644
--- a/keyservice/keyserviceproto/keyservice.pb.go
+++ b/keyservice/keyserviceproto/keyservice.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.22.1
// source: keyservice/keyserviceproto/keyservice.proto
diff --git a/s3proxy/deploy/s3proxy/Chart.yaml b/s3proxy/deploy/s3proxy/Chart.yaml
index f07afba51..514f700ff 100644
--- a/s3proxy/deploy/s3proxy/Chart.yaml
+++ b/s3proxy/deploy/s3proxy/Chart.yaml
@@ -2,4 +2,4 @@ apiVersion: v2
name: s3proxy
description: Helm chart to deploy s3proxy.
type: application
-version: 0.0.0
+version: 2.16.4
diff --git a/terraform-provider-constellation/BUILD.bazel b/terraform-provider-constellation/BUILD.bazel
index da0fd50ba..347af014b 100644
--- a/terraform-provider-constellation/BUILD.bazel
+++ b/terraform-provider-constellation/BUILD.bazel
@@ -6,6 +6,7 @@ go_binary(
name = "tf_provider",
out = "terraform-provider-constellation", # for complying with Terraform provider naming convention
embed = [":terraform-provider-constellation_lib"],
+ gotags = ["enterprise"],
pure = "on",
visibility = ["//visibility:public"],
)
diff --git a/terraform-provider-constellation/docs/data-sources/attestation.md b/terraform-provider-constellation/docs/data-sources/attestation.md
index bd578314c..ec4118c0f 100644
--- a/terraform-provider-constellation/docs/data-sources/attestation.md
+++ b/terraform-provider-constellation/docs/data-sources/attestation.md
@@ -33,6 +33,7 @@ data "constellation_attestation" "test" {
* `azure-sev-snp`
* `azure-tdx`
* `gcp-sev-es`
+ * `qemu-vtpm`
- `csp` (String) CSP (Cloud Service Provider) to use. (e.g. `azure`)
See the [full list of CSPs](https://docs.edgeless.systems/constellation/overview/clouds) that Constellation supports.
- `image` (Attributes) Constellation OS Image to use on the nodes. (see [below for nested schema](#nestedatt--image))
@@ -58,6 +59,10 @@ Required:
- `$SEMANTIC_VERSION` is the semantic version of the image, e.g. `vX.Y.Z` or `vX.Y.Z-pre...`.
- `version` (String) Semantic version of the image.
+Optional:
+
+- `marketplace_image` (Boolean) Whether a marketplace image should be used.
+
### Nested Schema for `attestation`
@@ -78,6 +83,7 @@ Read-Only:
* `azure-sev-snp`
* `azure-tdx`
* `gcp-sev-es`
+ * `qemu-vtpm`
### Nested Schema for `attestation.azure_firmware_signer_config`
diff --git a/terraform-provider-constellation/docs/data-sources/image.md b/terraform-provider-constellation/docs/data-sources/image.md
index 8eb48929e..7f7186b56 100644
--- a/terraform-provider-constellation/docs/data-sources/image.md
+++ b/terraform-provider-constellation/docs/data-sources/image.md
@@ -32,6 +32,7 @@ data "constellation_image" "example" {
* `azure-sev-snp`
* `azure-tdx`
* `gcp-sev-es`
+ * `qemu-vtpm`
- `csp` (String) CSP (Cloud Service Provider) to use. (e.g. `azure`)
See the [full list of CSPs](https://docs.edgeless.systems/constellation/overview/clouds) that Constellation supports.
@@ -49,6 +50,10 @@ The Constellation OS image must be [replicated to the region](https://docs.edgel
### Nested Schema for `image`
+Optional:
+
+- `marketplace_image` (Boolean) Whether a marketplace image should be used.
+
Read-Only:
- `reference` (String) CSP-specific unique reference to the image. The format differs per CSP.
diff --git a/terraform-provider-constellation/docs/resources/cluster.md b/terraform-provider-constellation/docs/resources/cluster.md
index d5deed553..7b6d1ca21 100644
--- a/terraform-provider-constellation/docs/resources/cluster.md
+++ b/terraform-provider-constellation/docs/resources/cluster.md
@@ -86,6 +86,7 @@ See the [full list of CSPs](https://docs.edgeless.systems/constellation/overview
- `gcp` (Attributes) GCP-specific configuration. (see [below for nested schema](#nestedatt--gcp))
- `in_cluster_endpoint` (String) The endpoint of the cluster. When not set, the out-of-cluster endpoint is used.
- `license_id` (String) Constellation license ID. When not set, the community license is used.
+- `openstack` (Attributes) OpenStack-specific configuration. (see [below for nested schema](#nestedatt--openstack))
### Read-Only
@@ -110,6 +111,7 @@ Required:
* `azure-sev-snp`
* `azure-tdx`
* `gcp-sev-es`
+ * `qemu-vtpm`
Optional:
@@ -162,6 +164,10 @@ Required:
- `$SEMANTIC_VERSION` is the semantic version of the image, e.g. `vX.Y.Z` or `vX.Y.Z-pre...`.
- `version` (String) Semantic version of the image.
+Optional:
+
+- `marketplace_image` (Boolean) Whether a marketplace image should be used.
+
### Nested Schema for `network_config`
@@ -207,6 +213,24 @@ Required:
- `project_id` (String) ID of the GCP project the cluster resides in.
- `service_account_key` (String) Base64-encoded private key JSON object of the service account used within the cluster.
+
+
+### Nested Schema for `openstack`
+
+Required:
+
+- `cloud` (String) Name of the cloud in the clouds.yaml file.
+- `floating_ip_pool_id` (String) Floating IP pool to use for the VMs.
+- `network_id` (String) OpenStack network ID to use for the VMs.
+- `subnet_id` (String) OpenStack subnet ID to use for the VMs.
+
+Optional:
+
+- `clouds_yaml_path` (String) Path to the clouds.yaml file.
+- `deploy_yawol_load_balancer` (Boolean) Whether to deploy a YAWOL load balancer.
+- `yawol_flavor_id` (String) OpenStack flavor used by the yawollet.
+- `yawol_image_id` (String) OpenStack OS image used by the yawollet.
+
## Import
Import is supported using the following syntax:
diff --git a/terraform-provider-constellation/examples/full/azure/main.tf b/terraform-provider-constellation/examples/full/azure/main.tf
index 629f07e2d..46a5f8f9b 100644
--- a/terraform-provider-constellation/examples/full/azure/main.tf
+++ b/terraform-provider-constellation/examples/full/azure/main.tf
@@ -44,8 +44,8 @@ module "azure_iam" {
// replace $VERSION with the Constellation version you want to use, e.g., v2.14.0
source = "https://github.com/edgelesssys/constellation/releases/download/$VERSION/terraform-module.zip//terraform-module/iam/azure"
location = local.location
- service_principal_name = "${local.name}-test-sp"
- resource_group_name = "${local.name}-test-rg"
+ service_principal_name = "${local.name}-sp"
+ resource_group_name = "${local.name}-rg"
}
module "azure_infrastructure" {
diff --git a/terraform-provider-constellation/examples/full/gcp/main.tf b/terraform-provider-constellation/examples/full/gcp/main.tf
index 1db0f63fb..f7ac80b04 100644
--- a/terraform-provider-constellation/examples/full/gcp/main.tf
+++ b/terraform-provider-constellation/examples/full/gcp/main.tf
@@ -46,7 +46,7 @@ module "gcp_iam" {
// replace $VERSION with the Constellation version you want to use, e.g., v2.14.0
source = "https://github.com/edgelesssys/constellation/releases/download/$VERSION/terraform-module.zip//terraform-module/iam/gcp"
project_id = local.project_id
- service_account_id = "${local.name}-test-sa"
+ service_account_id = "${local.name}-sa"
zone = local.zone
region = local.region
}
diff --git a/terraform-provider-constellation/examples/full/stackit/main.tf b/terraform-provider-constellation/examples/full/stackit/main.tf
new file mode 100644
index 000000000..22ef92451
--- /dev/null
+++ b/terraform-provider-constellation/examples/full/stackit/main.tf
@@ -0,0 +1,128 @@
+terraform {
+ required_providers {
+ constellation = {
+ source = "edgelesssys/constellation"
+ version = "0.0.0" // replace with the version you want to use
+ }
+ random = {
+ source = "hashicorp/random"
+ version = "3.6.0"
+ }
+ }
+}
+
+locals {
+ name = "constell"
+ image_version = "vX.Y.Z"
+ kubernetes_version = "vX.Y.Z"
+ microservice_version = "vX.Y.Z"
+ csp = "stackit"
+ attestation_variant = "qemu-vtpm"
+ zone = "eu01-1"
+ cloud = "stackit"
+ clouds_yaml_path = "~/.config/openstack/clouds.yaml"
+ floating_ip_pool_id = "970ace5c-458f-484a-a660-0903bcfd91ad"
+ stackit_project_id = "" // replace with the STACKIT project id
+ control_plane_count = 3
+ worker_count = 2
+ instance_type = "m1a.8cd"
+ deploy_yawol_load_balancer = true
+ yawol_image_id = "bcd6c13e-75d1-4c3f-bf0f-8f83580cc1be"
+ yawol_flavor_id = "3b11b27e-6c73-470d-b595-1d85b95a8cdf"
+
+ master_secret = random_bytes.master_secret.hex
+ master_secret_salt = random_bytes.master_secret_salt.hex
+ measurement_salt = random_bytes.measurement_salt.hex
+}
+
+resource "random_bytes" "master_secret" {
+ length = 32
+}
+
+resource "random_bytes" "master_secret_salt" {
+ length = 32
+}
+
+resource "random_bytes" "measurement_salt" {
+ length = 32
+}
+
+module "stackit_infrastructure" {
+ // replace $VERSION with the Constellation version you want to use, e.g., v2.14.0
+ source = "https://github.com/edgelesssys/constellation/releases/download/$VERSION/terraform-module.zip//terraform-module/openstack"
+ name = local.name
+ node_groups = {
+ control_plane_default = {
+ role = "control-plane"
+ flavor_id = local.instance_type
+ state_disk_size = 30
+ state_disk_type = "storage_premium_perf6"
+ initial_count = local.control_plane_count
+ zone = local.zone
+ },
+ worker_default = {
+ role = "worker"
+ flavor_id = local.instance_type
+ state_disk_size = 30
+ state_disk_type = "storage_premium_perf6"
+ initial_count = local.worker_count
+ zone = local.zone
+ }
+ }
+ image_id = data.constellation_image.bar.image.reference
+ debug = false
+ cloud = local.cloud
+ openstack_clouds_yaml_path = local.clouds_yaml_path
+ floating_ip_pool_id = local.floating_ip_pool_id
+ stackit_project_id = local.stackit_project_id
+}
+
+data "constellation_attestation" "foo" {
+ csp = local.csp
+ attestation_variant = local.attestation_variant
+ image = data.constellation_image.bar.image
+}
+
+data "constellation_image" "bar" {
+ csp = local.csp
+ attestation_variant = local.attestation_variant
+ version = local.image_version
+ marketplace_image = true
+}
+
+resource "constellation_cluster" "stackit_example" {
+ csp = local.csp
+ name = module.stackit_infrastructure.name
+ uid = module.stackit_infrastructure.uid
+ image = data.constellation_image.bar.image
+ attestation = data.constellation_attestation.foo.attestation
+ kubernetes_version = local.kubernetes_version
+ constellation_microservice_version = local.microservice_version
+ init_secret = module.stackit_infrastructure.init_secret
+ master_secret = local.master_secret
+ master_secret_salt = local.master_secret_salt
+ measurement_salt = local.measurement_salt
+ out_of_cluster_endpoint = module.stackit_infrastructure.out_of_cluster_endpoint
+ in_cluster_endpoint = module.stackit_infrastructure.in_cluster_endpoint
+ api_server_cert_sans = module.stackit_infrastructure.api_server_cert_sans
+ openstack = {
+ cloud = local.cloud
+ clouds_yaml_path = local.clouds_yaml_path
+ floating_ip_pool_id = local.floating_ip_pool_id
+ deploy_yawol_load_balancer = local.deploy_yawol_load_balancer
+ yawol_image_id = local.yawol_image_id
+ yawol_flavor_id = local.yawol_flavor_id
+ network_id = module.stackit_infrastructure.network_id
+ subnet_id = module.stackit_infrastructure.lb_subnetwork_id
+ }
+ network_config = {
+ ip_cidr_node = module.stackit_infrastructure.ip_cidr_node
+ ip_cidr_service = "10.96.0.0/12"
+ }
+}
+
+output "kubeconfig" {
+ value = constellation_cluster.stackit_example.kubeconfig
+ sensitive = true
+ description = "KubeConfig for the Constellation cluster."
+}
diff --git a/terraform-provider-constellation/internal/provider/BUILD.bazel b/terraform-provider-constellation/internal/provider/BUILD.bazel
index 23cd58f8f..1fac7618a 100644
--- a/terraform-provider-constellation/internal/provider/BUILD.bazel
+++ b/terraform-provider-constellation/internal/provider/BUILD.bazel
@@ -23,6 +23,8 @@ go_library(
"//internal/attestation/variant",
"//internal/cloud/azureshared",
"//internal/cloud/cloudprovider",
+ "//internal/cloud/openstack",
+ "//internal/cloud/openstack/clouds",
"//internal/compatibility",
"//internal/config",
"//internal/constants",
@@ -30,6 +32,7 @@ go_library(
"//internal/constellation/helm",
"//internal/constellation/kubecmd",
"//internal/constellation/state",
+ "//internal/file",
"//internal/grpc/dialer",
"//internal/imagefetcher",
"//internal/kms/uri",
@@ -53,6 +56,7 @@ go_library(
"@com_github_hashicorp_terraform_plugin_framework//types/basetypes",
"@com_github_hashicorp_terraform_plugin_framework_validators//stringvalidator",
"@com_github_hashicorp_terraform_plugin_log//tflog",
+ "@com_github_spf13_afero//:afero",
],
)
diff --git a/terraform-provider-constellation/internal/provider/attestation_data_source_test.go b/terraform-provider-constellation/internal/provider/attestation_data_source_test.go
index 3d8e7342e..4fed9fbe3 100644
--- a/terraform-provider-constellation/internal/provider/attestation_data_source_test.go
+++ b/terraform-provider-constellation/internal/provider/attestation_data_source_test.go
@@ -110,6 +110,58 @@ func TestAccAttestationSource(t *testing.T) {
},
},
},
+ "STACKIT qemu-vtpm success": {
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ PreCheck: bazelPreCheck,
+ Steps: []resource.TestStep{
+ {
+ Config: testingConfig + `
+ data "constellation_attestation" "test" {
+ csp = "stackit"
+ attestation_variant = "qemu-vtpm"
+ image = {
+ version = "v2.13.0"
+ reference = "v2.13.0"
+ short_path = "v2.13.0"
+ }
+ }
+ `,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.variant", "qemu-vtpm"),
+ resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.bootloader_version", "0"), // since this is not supported on STACKIT, we expect 0
+
+ resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.measurements.15.expected", "0000000000000000000000000000000000000000000000000000000000000000"),
+ resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.measurements.15.warn_only", "false"),
+ ),
+ },
+ },
+ },
+ "openstack qemu-vtpm success": {
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ PreCheck: bazelPreCheck,
+ Steps: []resource.TestStep{
+ {
+ Config: testingConfig + `
+ data "constellation_attestation" "test" {
+ csp = "openstack"
+ attestation_variant = "qemu-vtpm"
+ image = {
+ version = "v2.13.0"
+ reference = "v2.13.0"
+ short_path = "v2.13.0"
+ }
+ }
+ `,
+ Check: resource.ComposeAggregateTestCheckFunc(
+ resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.variant", "qemu-vtpm"),
+ resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.bootloader_version", "0"), // since this is not supported on OpenStack, we expect 0
+
+ resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.measurements.15.expected", "0000000000000000000000000000000000000000000000000000000000000000"),
+ resource.TestCheckResourceAttr("data.constellation_attestation.test", "attestation.measurements.15.warn_only", "false"),
+ ),
+ },
+ },
+ },
}
for name, tc := range testCases {
diff --git a/terraform-provider-constellation/internal/provider/cluster_resource.go b/terraform-provider-constellation/internal/provider/cluster_resource.go
index 9f51aa848..a12fe38da 100644
--- a/terraform-provider-constellation/internal/provider/cluster_resource.go
+++ b/terraform-provider-constellation/internal/provider/cluster_resource.go
@@ -26,6 +26,8 @@ import (
"github.com/edgelesssys/constellation/v2/internal/attestation/variant"
"github.com/edgelesssys/constellation/v2/internal/cloud/azureshared"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
+ openstackshared "github.com/edgelesssys/constellation/v2/internal/cloud/openstack"
+ "github.com/edgelesssys/constellation/v2/internal/cloud/openstack/clouds"
"github.com/edgelesssys/constellation/v2/internal/compatibility"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/constants"
@@ -33,6 +35,7 @@ import (
"github.com/edgelesssys/constellation/v2/internal/constellation/helm"
"github.com/edgelesssys/constellation/v2/internal/constellation/kubecmd"
"github.com/edgelesssys/constellation/v2/internal/constellation/state"
+ "github.com/edgelesssys/constellation/v2/internal/file"
"github.com/edgelesssys/constellation/v2/internal/grpc/dialer"
"github.com/edgelesssys/constellation/v2/internal/kms/uri"
"github.com/edgelesssys/constellation/v2/internal/license"
@@ -50,6 +53,7 @@ import (
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
"github.com/hashicorp/terraform-plugin-log/tflog"
+ "github.com/spf13/afero"
)
var (
@@ -96,6 +100,7 @@ type ClusterResourceModel struct {
Attestation types.Object `tfsdk:"attestation"`
GCP types.Object `tfsdk:"gcp"`
Azure types.Object `tfsdk:"azure"`
+ OpenStack types.Object `tfsdk:"openstack"`
OwnerID types.String `tfsdk:"owner_id"`
ClusterID types.String `tfsdk:"cluster_id"`
@@ -129,6 +134,17 @@ type azureAttribute struct {
LoadBalancerName string `tfsdk:"load_balancer_name"`
}
+type openStackAttribute struct {
+ Cloud string `tfsdk:"cloud"`
+ CloudsYAMLPath string `tfsdk:"clouds_yaml_path"`
+ FloatingIPPoolID string `tfsdk:"floating_ip_pool_id"`
+ DeployYawolLoadBalancer bool `tfsdk:"deploy_yawol_load_balancer"`
+ YawolImageID string `tfsdk:"yawol_image_id"`
+ YawolFlavorID string `tfsdk:"yawol_flavor_id"`
+ NetworkID string `tfsdk:"network_id"`
+ SubnetID string `tfsdk:"subnet_id"`
+}
+
// extraMicroservicesAttribute is the extra microservices attribute's data model.
type extraMicroservicesAttribute struct {
CSIDriver bool `tfsdk:"csi_driver"`
@@ -333,6 +349,53 @@ func (r *ClusterResource) Schema(_ context.Context, _ resource.SchemaRequest, re
},
},
},
+ "openstack": schema.SingleNestedAttribute{
+ MarkdownDescription: "OpenStack-specific configuration.",
+ Description: "OpenStack-specific configuration.",
+ Optional: true,
+ Attributes: map[string]schema.Attribute{
+ "cloud": schema.StringAttribute{
+ MarkdownDescription: "Name of the cloud in the clouds.yaml file.",
+ Description: "Name of the cloud in the clouds.yaml file.",
+ Required: true,
+ },
+ "clouds_yaml_path": schema.StringAttribute{
+ MarkdownDescription: "Path to the clouds.yaml file.",
+ Description: "Path to the clouds.yaml file.",
+ Optional: true,
+ },
+ "floating_ip_pool_id": schema.StringAttribute{
+ MarkdownDescription: "Floating IP pool to use for the VMs.",
+ Description: "Floating IP pool to use for the VMs.",
+ Required: true,
+ },
+ "deploy_yawol_load_balancer": schema.BoolAttribute{
+ MarkdownDescription: "Whether to deploy a YAWOL load balancer.",
+ Description: "Whether to deploy a YAWOL load balancer.",
+ Optional: true,
+ },
+ "yawol_image_id": schema.StringAttribute{
+ MarkdownDescription: "OpenStack OS image used by the yawollet.",
+ Description: "OpenStack OS image used by the yawollet.",
+ Optional: true,
+ },
+ "yawol_flavor_id": schema.StringAttribute{
+ MarkdownDescription: "OpenStack flavor used by the yawollet.",
+ Description: "OpenStack flavor used by the yawollet.",
+ Optional: true,
+ },
+ "network_id": schema.StringAttribute{
+ MarkdownDescription: "OpenStack network ID to use for the VMs.",
+ Description: "OpenStack network ID to use for the VMs.",
+ Required: true,
+ },
+ "subnet_id": schema.StringAttribute{
+ MarkdownDescription: "OpenStack subnet ID to use for the VMs.",
+ Description: "OpenStack subnet ID to use for the VMs.",
+ Required: true,
+ },
+ },
+ },
// Computed (output) attributes
"owner_id": schema.StringAttribute{
@@ -406,6 +469,26 @@ func (r *ClusterResource) ValidateConfig(ctx context.Context, req resource.Valid
"GCP configuration not allowed", "When csp is not set to 'gcp', setting the 'gcp' configuration has no effect.",
)
}
+
+ // OpenStack Config is required for OpenStack
+ if (strings.EqualFold(data.CSP.ValueString(), cloudprovider.OpenStack.String()) ||
+ strings.EqualFold(data.CSP.ValueString(), "stackit")) &&
+ data.OpenStack.IsNull() {
+ resp.Diagnostics.AddAttributeError(
+ path.Root("openstack"),
+ "OpenStack configuration missing", "When csp is set to 'openstack' or 'stackit', the 'openstack' configuration must be set.",
+ )
+ }
+
+ // OpenStack Config should not be set for other CSPs
+ if !strings.EqualFold(data.CSP.ValueString(), cloudprovider.OpenStack.String()) &&
+ !strings.EqualFold(data.CSP.ValueString(), "stackit") &&
+ !data.OpenStack.IsNull() {
+ resp.Diagnostics.AddAttributeWarning(
+ path.Root("openstack"),
+ "OpenStack configuration not allowed", "When csp is not set to 'openstack' or 'stackit', setting the 'openstack' configuration has no effect.",
+ )
+ }
}
// Configure configures the resource.
@@ -447,28 +530,31 @@ func (r *ClusterResource) ModifyPlan(ctx context.Context, req resource.ModifyPla
return
}
- licenseID := plannedState.LicenseID.ValueString()
- if licenseID == "" {
- resp.Diagnostics.AddWarning("Constellation license ID not set.",
- "Continuing with community license.")
- }
- if licenseID == license.CommunityLicense {
- resp.Diagnostics.AddWarning("Using community license.",
- "For details, see https://docs.edgeless.systems/constellation/overview/license")
- }
-
// Validate during plan. Must be done in ModifyPlan to read provider data.
// See https://developer.hashicorp.com/terraform/plugin/framework/resources/configure#define-resource-configure-method.
_, diags := r.getMicroserviceVersion(&plannedState)
resp.Diagnostics.Append(diags...)
- _, _, diags = r.getImageVersion(ctx, &plannedState)
+ var image imageAttribute
+ image, _, diags = r.getImageVersion(ctx, &plannedState)
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
return
}
+ licenseID := plannedState.LicenseID.ValueString()
+ switch {
+ case image.MarketplaceImage != nil && *image.MarketplaceImage:
+ // Marketplace images do not require a license.
+ case licenseID == "":
+ resp.Diagnostics.AddWarning("Constellation license ID not set.",
+ "Continuing with community license.")
+ case licenseID == license.CommunityLicense:
+ resp.Diagnostics.AddWarning("Using community license.",
+ "For details, see https://docs.edgeless.systems/constellation/overview/license")
+ }
+
// Checks running on updates to the resource. (i.e. state and plan != nil)
if !req.State.Raw.IsNull() {
// Read currentState supplied by Terraform runtime into the model
@@ -759,9 +845,13 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel,
// parse license ID
licenseID := data.LicenseID.ValueString()
- if licenseID == "" {
+ switch {
+ case image.MarketplaceImage != nil && *image.MarketplaceImage:
+ licenseID = license.MarketplaceLicense
+ case licenseID == "":
licenseID = license.CommunityLicense
}
+
// license ID can be base64-encoded
licenseIDFromB64, err := base64.StdEncoding.DecodeString(licenseID)
if err == nil {
@@ -772,6 +862,7 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel,
serviceAccPayload := constellation.ServiceAccountPayload{}
var gcpConfig gcpAttribute
var azureConfig azureAttribute
+ var openStackConfig openStackAttribute
switch csp {
case cloudprovider.GCP:
convertDiags = data.GCP.As(ctx, &gcpConfig, basetypes.ObjectAsOptions{})
@@ -808,6 +899,33 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel,
PreferredAuthMethod: azureshared.AuthMethodUserAssignedIdentity,
UamiResourceID: azureConfig.UamiResourceID,
}
+ case cloudprovider.OpenStack:
+ convertDiags = data.OpenStack.As(ctx, &openStackConfig, basetypes.ObjectAsOptions{})
+ diags.Append(convertDiags...)
+ if diags.HasError() {
+ return diags
+ }
+ cloudsYAML, err := clouds.ReadCloudsYAML(file.NewHandler(afero.NewOsFs()), openStackConfig.CloudsYAMLPath)
+ if err != nil {
+ diags.AddError("Reading clouds.yaml", err.Error())
+ return diags
+ }
+ cloud, ok := cloudsYAML.Clouds[openStackConfig.Cloud]
+ if !ok {
+ diags.AddError("Reading clouds.yaml", fmt.Sprintf("Cloud %s not found in clouds.yaml", openStackConfig.Cloud))
+ return diags
+ }
+ serviceAccPayload.OpenStack = openstackshared.AccountKey{
+ AuthURL: cloud.AuthInfo.AuthURL,
+ Username: cloud.AuthInfo.Username,
+ Password: cloud.AuthInfo.Password,
+ ProjectID: cloud.AuthInfo.ProjectID,
+ ProjectName: cloud.AuthInfo.ProjectName,
+ UserDomainName: cloud.AuthInfo.UserDomainName,
+ ProjectDomainName: cloud.AuthInfo.ProjectDomainName,
+ RegionName: cloud.RegionName,
+ }
+
}
serviceAccURI, err := constellation.MarshalServiceAccountURI(csp, serviceAccPayload)
if err != nil {
@@ -854,6 +972,11 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel,
ProjectID: gcpConfig.ProjectID,
IPCidrPod: networkCfg.IPCidrPod.ValueString(),
}
+ case cloudprovider.OpenStack:
+ stateFile.Infrastructure.OpenStack = &state.OpenStack{
+ NetworkID: openStackConfig.NetworkID,
+ SubnetID: openStackConfig.SubnetID,
+ }
}
// Check license
@@ -930,6 +1053,14 @@ func (r *ClusterResource) apply(ctx context.Context, data *ClusterResourceModel,
masterSecret: secrets.masterSecret,
serviceAccURI: serviceAccURI,
}
+ if csp == cloudprovider.OpenStack {
+ payload.openStackHelmValues = &helm.OpenStackValues{
+ DeployYawolLoadBalancer: openStackConfig.DeployYawolLoadBalancer,
+ FloatingIPPoolID: openStackConfig.FloatingIPPoolID,
+ YawolImageID: openStackConfig.YawolImageID,
+ YawolFlavorID: openStackConfig.YawolFlavorID,
+ }
+ }
helmDiags := r.applyHelmCharts(ctx, applier, payload, stateFile)
diags.Append(helmDiags...)
if diags.HasError() {
@@ -1056,6 +1187,7 @@ type applyHelmChartsPayload struct {
DeployCSIDriver bool // Whether to deploy the CSI driver.
masterSecret uri.MasterSecret // master secret of the cluster.
serviceAccURI string // URI of the service account used within the cluster.
+ openStackHelmValues *helm.OpenStackValues // OpenStack-specific Helm values.
}
// applyHelmCharts applies the Helm charts to the cluster.
@@ -1076,10 +1208,11 @@ func (r *ClusterResource) applyHelmCharts(ctx context.Context, applier *constell
// Allow destructive changes to the cluster.
// The user has previously been warned about this when planning a microservice version change.
AllowDestructive: helm.AllowDestructive,
+ OpenStackValues: payload.openStackHelmValues,
}
executor, _, err := applier.PrepareHelmCharts(options, state,
- payload.serviceAccURI, payload.masterSecret, nil)
+ payload.serviceAccURI, payload.masterSecret)
var upgradeErr *compatibility.InvalidUpgradeError
if err != nil {
if !errors.As(err, &upgradeErr) {
diff --git a/terraform-provider-constellation/internal/provider/cluster_resource_test.go b/terraform-provider-constellation/internal/provider/cluster_resource_test.go
index 9cc197bb5..fb1b5c4fc 100644
--- a/terraform-provider-constellation/internal/provider/cluster_resource_test.go
+++ b/terraform-provider-constellation/internal/provider/cluster_resource_test.go
@@ -97,9 +97,10 @@ func TestViolatedImageConstraint(t *testing.T) {
}
input, diags := basetypes.NewObjectValueFrom(context.Background(), map[string]attr.Type{
- "version": basetypes.StringType{},
- "reference": basetypes.StringType{},
- "short_path": basetypes.StringType{},
+ "version": basetypes.StringType{},
+ "reference": basetypes.StringType{},
+ "short_path": basetypes.StringType{},
+ "marketplace_image": basetypes.BoolType{},
}, img)
require.Equal(t, 0, diags.ErrorsCount())
_, _, diags2 := sut.getImageVersion(context.Background(), &ClusterResourceModel{
@@ -488,6 +489,68 @@ func TestAccClusterResource(t *testing.T) {
},
},
},
+ "stackit config missing": {
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactoriesWithVersion(providerVersion),
+ PreCheck: bazelPreCheck,
+ Steps: []resource.TestStep{
+ {
+ Config: fullClusterTestingConfig(t, "openstack") + fmt.Sprintf(`
+ resource "constellation_cluster" "test" {
+ csp = "stackit"
+ name = "constell"
+ uid = "test"
+ image = data.constellation_image.bar.image
+ attestation = data.constellation_attestation.foo.attestation
+ init_secret = "deadbeef"
+ master_secret = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
+ master_secret_salt = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
+ measurement_salt = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
+ out_of_cluster_endpoint = "192.0.2.1"
+ in_cluster_endpoint = "192.0.2.1"
+ network_config = {
+ ip_cidr_node = "0.0.0.0/24"
+ ip_cidr_service = "0.0.0.0/24"
+ ip_cidr_pod = "0.0.0.0/24"
+ }
+ kubernetes_version = "%s"
+ constellation_microservice_version = "%s"
+ }
+ `, versions.Default, providerVersion),
+ ExpectError: regexp.MustCompile(".*When csp is set to 'openstack' or 'stackit', the 'openstack' configuration\nmust be set.*"),
+ },
+ },
+ },
+ "openstack config missing": {
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactoriesWithVersion(providerVersion),
+ PreCheck: bazelPreCheck,
+ Steps: []resource.TestStep{
+ {
+ Config: fullClusterTestingConfig(t, "openstack") + fmt.Sprintf(`
+ resource "constellation_cluster" "test" {
+ csp = "openstack"
+ name = "constell"
+ uid = "test"
+ image = data.constellation_image.bar.image
+ attestation = data.constellation_attestation.foo.attestation
+ init_secret = "deadbeef"
+ master_secret = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
+ master_secret_salt = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
+ measurement_salt = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
+ out_of_cluster_endpoint = "192.0.2.1"
+ in_cluster_endpoint = "192.0.2.1"
+ network_config = {
+ ip_cidr_node = "0.0.0.0/24"
+ ip_cidr_service = "0.0.0.0/24"
+ ip_cidr_pod = "0.0.0.0/24"
+ }
+ kubernetes_version = "%s"
+ constellation_microservice_version = "%s"
+ }
+ `, versions.Default, providerVersion),
+ ExpectError: regexp.MustCompile(".*When csp is set to 'openstack' or 'stackit', the 'openstack' configuration\nmust be set.*"),
+ },
+ },
+ },
}
for name, tc := range testCases {
@@ -546,6 +609,19 @@ func fullClusterTestingConfig(t *testing.T, csp string) string {
attestation_variant = "gcp-sev-es"
image = data.constellation_image.bar.image
}`, image)
+ case "openstack":
+ return providerConfig + fmt.Sprintf(`
+ data "constellation_image" "bar" {
+ version = "%s"
+ attestation_variant = "qemu-vtpm"
+ csp = "openstack"
+ }
+
+ data "constellation_attestation" "foo" {
+ csp = "openstack"
+ attestation_variant = "qemu-vtpm"
+ image = data.constellation_image.bar.image
+ }`, image)
default:
t.Fatal("unknown csp")
return ""
diff --git a/terraform-provider-constellation/internal/provider/convert.go b/terraform-provider-constellation/internal/provider/convert.go
index 552bdcdd2..087728168 100644
--- a/terraform-provider-constellation/internal/provider/convert.go
+++ b/terraform-provider-constellation/internal/provider/convert.go
@@ -122,6 +122,10 @@ func convertFromTfAttestationCfg(tfAttestation attestationAttribute, attestation
attestationConfig = &config.GCPSEVES{
Measurements: c11nMeasurements,
}
+ case variant.QEMUVTPM{}:
+ attestationConfig = &config.QEMUVTPM{
+ Measurements: c11nMeasurements,
+ }
default:
return nil, fmt.Errorf("unknown attestation variant: %s", attestationVariant)
}
@@ -177,7 +181,7 @@ func convertToTfAttestation(attVar variant.Variant, snpVersions attestationconfi
XFAM: hex.EncodeToString(tdxCfg.XFAM),
}
tfAttestation.TDX = tfTdxCfg
- case variant.GCPSEVES{}:
+ case variant.GCPSEVES{}, variant.QEMUVTPM{}:
// no additional fields
default:
return tfAttestation, fmt.Errorf("unknown attestation variant: %s", attVar)
diff --git a/terraform-provider-constellation/internal/provider/image_data_source.go b/terraform-provider-constellation/internal/provider/image_data_source.go
index 5e97bdcb4..6ed11c363 100644
--- a/terraform-provider-constellation/internal/provider/image_data_source.go
+++ b/terraform-provider-constellation/internal/provider/image_data_source.go
@@ -252,9 +252,10 @@ func (d *ImageDataSource) Read(ctx context.Context, req datasource.ReadRequest,
// Save data into Terraform state
diags := resp.State.SetAttribute(ctx, path.Root("image"), imageAttribute{
- Reference: imageRef,
- Version: imageSemver,
- ShortPath: apiCompatibleVer.ShortPath(),
+ Reference: imageRef,
+ Version: imageSemver,
+ ShortPath: apiCompatibleVer.ShortPath(),
+ MarketplaceImage: data.MarketplaceImage.ValueBoolPointer(),
})
resp.Diagnostics.Append(diags...)
if resp.Diagnostics.HasError() {
diff --git a/terraform-provider-constellation/internal/provider/image_data_source_test.go b/terraform-provider-constellation/internal/provider/image_data_source_test.go
index 787b7aacf..669899e39 100644
--- a/terraform-provider-constellation/internal/provider/image_data_source_test.go
+++ b/terraform-provider-constellation/internal/provider/image_data_source_test.go
@@ -141,6 +141,38 @@ func TestAccImageDataSource(t *testing.T) {
},
},
},
+ "stackit success": {
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ PreCheck: bazelPreCheck,
+ Steps: []resource.TestStep{
+ {
+ Config: testingConfig + `
+ data "constellation_image" "test" {
+ version = "v2.16.0"
+ attestation_variant = "qemu-vtpm"
+ csp = "stackit"
+ }
+ `,
+ Check: resource.TestCheckResourceAttr("data.constellation_image.test", "image.reference", "8ffc1740-1e41-4281-b872-f8088ffd7692"), // should be immutable,
+ },
+ },
+ },
+ "openstack success": {
+ ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
+ PreCheck: bazelPreCheck,
+ Steps: []resource.TestStep{
+ {
+ Config: testingConfig + `
+ data "constellation_image" "test" {
+ version = "v2.16.0"
+ attestation_variant = "qemu-vtpm"
+ csp = "openstack"
+ }
+ `,
+ Check: resource.TestCheckResourceAttr("data.constellation_image.test", "image.reference", "8ffc1740-1e41-4281-b872-f8088ffd7692"), // should be immutable,
+ },
+ },
+ },
"unknown attestation variant": {
ProtoV6ProviderFactories: testAccProtoV6ProviderFactories,
PreCheck: bazelPreCheck,
diff --git a/terraform-provider-constellation/internal/provider/shared_attributes.go b/terraform-provider-constellation/internal/provider/shared_attributes.go
index 79535a53c..b6f96cd17 100644
--- a/terraform-provider-constellation/internal/provider/shared_attributes.go
+++ b/terraform-provider-constellation/internal/provider/shared_attributes.go
@@ -31,11 +31,12 @@ func newAttestationVariantAttributeSchema(t attributeType) schema.Attribute {
" * `aws-nitro-tpm`\n" +
" * `azure-sev-snp`\n" +
" * `azure-tdx`\n" +
- " * `gcp-sev-es`\n",
+ " * `gcp-sev-es`\n" +
+ " * `qemu-vtpm`\n",
Required: isInput,
Computed: !isInput,
Validators: []validator.String{
- stringvalidator.OneOf("aws-sev-snp", "aws-nitro-tpm", "azure-sev-snp", "azure-tdx", "gcp-sev-es"),
+ stringvalidator.OneOf("aws-sev-snp", "aws-nitro-tpm", "azure-sev-snp", "azure-tdx", "gcp-sev-es", "qemu-vtpm"),
},
}
}
@@ -47,7 +48,7 @@ func newCSPAttributeSchema() schema.Attribute {
"See the [full list of CSPs](https://docs.edgeless.systems/constellation/overview/clouds) that Constellation supports.",
Required: true,
Validators: []validator.String{
- stringvalidator.OneOf("aws", "azure", "gcp"),
+ stringvalidator.OneOf("aws", "azure", "gcp", "openstack", "stackit"),
},
}
}
@@ -229,13 +230,19 @@ func newImageAttributeSchema(t attributeType) schema.Attribute {
Computed: !isInput,
Required: isInput,
},
+ "marketplace_image": schema.BoolAttribute{
+ Description: "Whether a marketplace image should be used.",
+ MarkdownDescription: "Whether a marketplace image should be used.",
+ Optional: true,
+ },
},
}
}
// imageAttribute is the image attribute's data model.
type imageAttribute struct {
- Reference string `tfsdk:"reference"`
- Version string `tfsdk:"version"`
- ShortPath string `tfsdk:"short_path"`
+ Reference string `tfsdk:"reference"`
+ Version string `tfsdk:"version"`
+ ShortPath string `tfsdk:"short_path"`
+ MarketplaceImage *bool `tfsdk:"marketplace_image"`
}
diff --git a/terraform/BUILD.bazel b/terraform/BUILD.bazel
index f4f99f242..88e71216d 100644
--- a/terraform/BUILD.bazel
+++ b/terraform/BUILD.bazel
@@ -77,6 +77,7 @@ go_library(
"infrastructure/aws/modules/public_private_subnet/output.tf",
"infrastructure/openstack/modules/stackit_loadbalancer/main.tf",
"infrastructure/openstack/modules/stackit_loadbalancer/variables.tf",
+ "infrastructure/iam/aws/alb_policy.json",
],
importpath = "github.com/edgelesssys/constellation/v2/terraform",
visibility = ["//visibility:public"],
diff --git a/terraform/infrastructure/iam/aws/alb_policy.json b/terraform/infrastructure/iam/aws/alb_policy.json
new file mode 100644
index 000000000..e8a05f8e6
--- /dev/null
+++ b/terraform/infrastructure/iam/aws/alb_policy.json
@@ -0,0 +1,242 @@
+{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": [
+ "iam:CreateServiceLinkedRole"
+ ],
+ "Resource": "*",
+ "Condition": {
+ "StringEquals": {
+ "iam:AWSServiceName": "elasticloadbalancing.amazonaws.com"
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:DescribeAccountAttributes",
+ "ec2:DescribeAddresses",
+ "ec2:DescribeAvailabilityZones",
+ "ec2:DescribeInternetGateways",
+ "ec2:DescribeVpcs",
+ "ec2:DescribeVpcPeeringConnections",
+ "ec2:DescribeSubnets",
+ "ec2:DescribeSecurityGroups",
+ "ec2:DescribeInstances",
+ "ec2:DescribeNetworkInterfaces",
+ "ec2:DescribeTags",
+ "ec2:GetCoipPoolUsage",
+ "ec2:DescribeCoipPools",
+ "elasticloadbalancing:DescribeLoadBalancers",
+ "elasticloadbalancing:DescribeLoadBalancerAttributes",
+ "elasticloadbalancing:DescribeListeners",
+ "elasticloadbalancing:DescribeListenerCertificates",
+ "elasticloadbalancing:DescribeSSLPolicies",
+ "elasticloadbalancing:DescribeRules",
+ "elasticloadbalancing:DescribeTargetGroups",
+ "elasticloadbalancing:DescribeTargetGroupAttributes",
+ "elasticloadbalancing:DescribeTargetHealth",
+ "elasticloadbalancing:DescribeTags",
+ "elasticloadbalancing:DescribeTrustStores"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "cognito-idp:DescribeUserPoolClient",
+ "acm:ListCertificates",
+ "acm:DescribeCertificate",
+ "iam:ListServerCertificates",
+ "iam:GetServerCertificate",
+ "waf-regional:GetWebACL",
+ "waf-regional:GetWebACLForResource",
+ "waf-regional:AssociateWebACL",
+ "waf-regional:DisassociateWebACL",
+ "wafv2:GetWebACL",
+ "wafv2:GetWebACLForResource",
+ "wafv2:AssociateWebACL",
+ "wafv2:DisassociateWebACL",
+ "shield:GetSubscriptionState",
+ "shield:DescribeProtection",
+ "shield:CreateProtection",
+ "shield:DeleteProtection"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:AuthorizeSecurityGroupIngress",
+ "ec2:RevokeSecurityGroupIngress"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:CreateSecurityGroup"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:CreateTags"
+ ],
+ "Resource": "arn:aws:ec2:*:*:security-group/*",
+ "Condition": {
+ "StringEquals": {
+ "ec2:CreateAction": "CreateSecurityGroup"
+ },
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "false"
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:CreateTags",
+ "ec2:DeleteTags"
+ ],
+ "Resource": "arn:aws:ec2:*:*:security-group/*",
+ "Condition": {
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "true",
+ "aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "ec2:AuthorizeSecurityGroupIngress",
+ "ec2:RevokeSecurityGroupIngress",
+ "ec2:DeleteSecurityGroup"
+ ],
+ "Resource": "*",
+ "Condition": {
+ "Null": {
+ "aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "elasticloadbalancing:CreateLoadBalancer",
+ "elasticloadbalancing:CreateTargetGroup"
+ ],
+ "Resource": "*",
+ "Condition": {
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "false"
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "elasticloadbalancing:CreateListener",
+ "elasticloadbalancing:DeleteListener",
+ "elasticloadbalancing:CreateRule",
+ "elasticloadbalancing:DeleteRule"
+ ],
+ "Resource": "*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:RemoveTags"
+ ],
+ "Resource": [
+ "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*",
+ "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*",
+ "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*"
+ ],
+ "Condition": {
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "true",
+ "aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "elasticloadbalancing:AddTags",
+ "elasticloadbalancing:RemoveTags"
+ ],
+ "Resource": [
+ "arn:aws:elasticloadbalancing:*:*:listener/net/*/*/*",
+ "arn:aws:elasticloadbalancing:*:*:listener/app/*/*/*",
+ "arn:aws:elasticloadbalancing:*:*:listener-rule/net/*/*/*",
+ "arn:aws:elasticloadbalancing:*:*:listener-rule/app/*/*/*"
+ ]
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "elasticloadbalancing:ModifyLoadBalancerAttributes",
+ "elasticloadbalancing:SetIpAddressType",
+ "elasticloadbalancing:SetSecurityGroups",
+ "elasticloadbalancing:SetSubnets",
+ "elasticloadbalancing:DeleteLoadBalancer",
+ "elasticloadbalancing:ModifyTargetGroup",
+ "elasticloadbalancing:ModifyTargetGroupAttributes",
+ "elasticloadbalancing:DeleteTargetGroup"
+ ],
+ "Resource": "*",
+ "Condition": {
+ "Null": {
+ "aws:ResourceTag/elbv2.k8s.aws/cluster": "false"
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "elasticloadbalancing:AddTags"
+ ],
+ "Resource": [
+ "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*",
+ "arn:aws:elasticloadbalancing:*:*:loadbalancer/net/*/*",
+ "arn:aws:elasticloadbalancing:*:*:loadbalancer/app/*/*"
+ ],
+ "Condition": {
+ "StringEquals": {
+ "elasticloadbalancing:CreateAction": [
+ "CreateTargetGroup",
+ "CreateLoadBalancer"
+ ]
+ },
+ "Null": {
+ "aws:RequestTag/elbv2.k8s.aws/cluster": "false"
+ }
+ }
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "elasticloadbalancing:RegisterTargets",
+ "elasticloadbalancing:DeregisterTargets"
+ ],
+ "Resource": "arn:aws:elasticloadbalancing:*:*:targetgroup/*/*"
+ },
+ {
+ "Effect": "Allow",
+ "Action": [
+ "elasticloadbalancing:SetWebAcl",
+ "elasticloadbalancing:ModifyListener",
+ "elasticloadbalancing:AddListenerCertificates",
+ "elasticloadbalancing:RemoveListenerCertificates",
+ "elasticloadbalancing:ModifyRule"
+ ],
+ "Resource": "*"
+ }
+ ]
+}
diff --git a/terraform/infrastructure/iam/aws/main.tf b/terraform/infrastructure/iam/aws/main.tf
index 2394841b3..c2dbf7c20 100644
--- a/terraform/infrastructure/iam/aws/main.tf
+++ b/terraform/infrastructure/iam/aws/main.tf
@@ -242,3 +242,20 @@ resource "aws_iam_role_policy_attachment" "csi_driver_policy_control_plane" {
role = aws_iam_role.control_plane_role.name
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy"
}
+
+// This policy is required by the AWS load balancer controller and can be found at
+// https://github.com/kubernetes-sigs/aws-load-balancer-controller/blob/b44633a/docs/install/iam_policy.json.
+resource "aws_iam_policy" "lb_policy" {
+ name = "${var.name_prefix}_lb_policy"
+ policy = file("${path.module}/alb_policy.json")
+}
+
+resource "aws_iam_role_policy_attachment" "attach_lb_policy_worker" {
+ role = aws_iam_role.worker_node_role.name
+ policy_arn = aws_iam_policy.lb_policy.arn
+}
+
+resource "aws_iam_role_policy_attachment" "attach_lb_policy_control_plane" {
+ role = aws_iam_role.control_plane_role.name
+ policy_arn = aws_iam_policy.lb_policy.arn
+}
diff --git a/terraform/infrastructure/openstack/.terraform.lock.hcl b/terraform/infrastructure/openstack/.terraform.lock.hcl
index 6f96f0f72..f33d99a2a 100644
--- a/terraform/infrastructure/openstack/.terraform.lock.hcl
+++ b/terraform/infrastructure/openstack/.terraform.lock.hcl
@@ -26,29 +26,29 @@ provider "registry.terraform.io/hashicorp/random" {
}
provider "registry.terraform.io/stackitcloud/stackit" {
- version = "0.12.0"
- constraints = "0.12.0"
+ version = "0.17.0"
+ constraints = "0.17.0"
hashes = [
- "h1:08k0ihJixjWGyzNF0wdMiOckr+4qfBi50yj4tTLsbMM=",
- "h1:8wtUYCXZke9uJiWp3Y7/tRy84UM0TjOzrzhb6BAX5vo=",
- "h1:EwUqtQ7b/ShFcNvBMiemsbrvqBwFfkIRtnEIeIisKSA=",
- "h1:lPXt86IQA6bHnX6o6xIaOUHqbAs6WHAehwtS1kK3wcg=",
- "h1:t+pHh9fQCS+4Rq9STVs+npH3DOe7qp1L0rJfbMjAdjM=",
+ "h1:I0ZMrepgLZz8LXV0gPU0AFoCuRUTlObilWeqXdu+EWY=",
+ "h1:Oft1EtSh7XSVS2Ypv2XxQ1fqUwkX8uKQNbNUY4Z2I1g=",
+ "h1:VDaLrd0U5udRiFg0oLdhJEn1HOvDeTaixs1al/5ad+g=",
+ "h1:aaCb+bQiK0DR/Fm3d88fBIaNjA0mu6qtY+psGRq0rIc=",
+ "h1:clv7CxYSYaIky+0fM9tCySAYStP1dWLiL8cV/3bQIUc=",
+ "zh:0a4c9388a0ebe806ade3d0489924d84880611c36b879cfc97088e871c6fdc9a8",
"zh:0dde99e7b343fa01f8eefc378171fb8621bedb20f59157d6cc8e3d46c738105f",
- "zh:13ff6111adb804e3e7a33a0e8e341e494a84a81115b144c950ea9864ce12efdb",
- "zh:2b13aff4a4879b833e27d215102c98809fe78d9a1fb33d09ec352760d21fa7c3",
- "zh:6562b6ca55bebd7e425fba60ba5683a3cb00d49d50883e37f418b5be8d52d992",
- "zh:6ce745a9a2fac88fd7b219dca1d70882e3c1b573e2d27a49de0a04b76ceabdf0",
- "zh:70dd57f2e59596f697aaeab377423a041a57e066d1ad8bbfc0ace9cfaf6e9e0d",
- "zh:7bb24a57ef0d802c62d23249078d86a0daeba29b7508d46bb8d104c5b820f35b",
- "zh:93b57ec66d0f18ef616416f9d39a5a5b45dde604145b66e5184f00840db7a981",
- "zh:9646f12a59a3eab161040eee68093b4c55864c865d544fa83d0e56bfbc59c174",
- "zh:c23b3433b81eb99e314239add0df206a5388ef79884e924537bf09d4374815a8",
- "zh:d2ef1946a5d559a72dac15a38a78f8d2d09bcd13068d9fe1debe7ae82e9c527d",
- "zh:d63299ca4bf158573706a0c313dbee0aa79c7b910d85a0a748ba77620f533a5d",
- "zh:e796aec8e1c64c7142d1b2877794ff8cb6fc5699292dfea102f2f229375626a2",
- "zh:eb4003be226dc810004cd6a50d98f872d61bb49f2891a2966247a245c9d7cc1c",
- "zh:f62e5390fca4d920c3db329276e1780ae57cc20aa666ee549dcf452d4f839ba5",
+ "zh:1cefebb92afefbc2407826d2ebe62291c15d585b391fe0a9f329b0d08eacd1b4",
+ "zh:248d0980ce0f34a8dcc37d3717d9cf1c85e05c944ca228e9d7bc330af5c3ec6e",
+ "zh:3161dd1973b5e2359402fd8a1838c881a9e1c514507b22440bc47b557d1a2574",
+ "zh:340a86a8a93e343702a87db423c493a1ab36a0a32e9c10e8c1c982020caae9bc",
+ "zh:38b49523eb1f48f5f4a0f1ef4b7de81bb468d3378e74dd72038f7228a4f3a5cb",
+ "zh:3b28a45818891bfabae5a52a43d38125ac7c220ad7a7c0a4682228be5552f14d",
+ "zh:59889ef8c6c84e9c2d647e5374ab1e59e043fbe5750eca7d06efcff308e2e403",
+ "zh:5baed6b0b8a9a1ca973b0bc792f56e02d46ffafe276c3b50b75570dd024f2afb",
+ "zh:81628b9cf966b2688ee3598868336c87e97b86d69c5caf761a6e77fde6eeefc1",
+ "zh:9a12f81c92f2a58340920dd468feca988f1bd3b8fe57ec8914d2f1db3ee363f7",
+ "zh:b7af6c4f1491a1e6c58148f8b6c1d5f0f77b13560d43fa5a70faa8ae09bde1f0",
+ "zh:d378cc2f87f84f91b06910be7fb9cf23a147d019be218f4f686f2441bdf0b32a",
+ "zh:f3cba27b8375d7e97151bf9f184864f06724895d0d9457c2ac3615ff175d7413",
]
}
diff --git a/terraform/infrastructure/openstack/main.tf b/terraform/infrastructure/openstack/main.tf
index 797423654..ff1324dc1 100644
--- a/terraform/infrastructure/openstack/main.tf
+++ b/terraform/infrastructure/openstack/main.tf
@@ -7,7 +7,7 @@ terraform {
stackit = {
source = "stackitcloud/stackit"
- version = "0.12.0"
+ version = "0.17.0"
}
random = {
@@ -56,6 +56,9 @@ locals {
endpoint if(endpoint.interface == "public")
][0]
identity_internal_url = local.identity_endpoint.url
+ cloudsyaml_path = length(var.openstack_clouds_yaml_path) > 0 ? var.openstack_clouds_yaml_path : "~/.config/openstack/clouds.yaml"
+ cloudsyaml = yamldecode(file(pathexpand(local.cloudsyaml_path)))
+ cloudyaml = local.cloudsyaml.clouds[var.cloud]
}
resource "random_id" "uid" {
@@ -236,9 +239,9 @@ module "instance_group" {
subnet_id = openstack_networking_subnet_v2.vpc_subnetwork.id
init_secret_hash = local.init_secret_hash
identity_internal_url = local.identity_internal_url
- openstack_username = var.openstack_username
- openstack_password = var.openstack_password
- openstack_user_domain_name = var.openstack_user_domain_name
+ openstack_username = local.cloudyaml["auth"]["username"]
+ openstack_password = local.cloudyaml["auth"]["password"]
+ openstack_user_domain_name = local.cloudyaml["auth"]["user_domain_name"]
openstack_load_balancer_endpoint = openstack_networking_floatingip_v2.public_ip.address
}
diff --git a/terraform/infrastructure/openstack/modules/instance_group/main.tf b/terraform/infrastructure/openstack/modules/instance_group/main.tf
index 0d2cdc77a..b104506d5 100644
--- a/terraform/infrastructure/openstack/modules/instance_group/main.tf
+++ b/terraform/infrastructure/openstack/modules/instance_group/main.tf
@@ -45,7 +45,6 @@ data "openstack_compute_flavor_v2" "flavor" {
resource "openstack_compute_instance_v2" "instance_group_member" {
name = "${local.name}-${count.index}"
count = var.initial_count
- image_id = var.image_id
flavor_id = data.openstack_compute_flavor_v2.flavor.id
tags = local.tags
# TODO(malt3): get this API enabled in the test environment
@@ -72,14 +71,19 @@ resource "openstack_compute_instance_v2" "instance_group_member" {
delete_on_termination = true
}
metadata = {
- constellation-role = var.role
- constellation-uid = var.uid
- constellation-init-secret-hash = var.init_secret_hash
+ constellation-role = var.role
+ constellation-uid = var.uid
+ constellation-init-secret-hash = var.init_secret_hash
+ }
+ user_data = jsonencode({
openstack-auth-url = var.identity_internal_url
openstack-username = var.openstack_username
openstack-password = var.openstack_password
openstack-user-domain-name = var.openstack_user_domain_name
openstack-load-balancer-endpoint = var.openstack_load_balancer_endpoint
+ })
+ availability_zone_hints = length(var.availability_zone) > 0 ? var.availability_zone : null
+ lifecycle {
+ ignore_changes = [block_device] # block device contains current image, which can be updated from inside the cluster
}
- availability_zone_hints = var.availability_zone
}
diff --git a/terraform/infrastructure/openstack/modules/stackit_loadbalancer/main.tf b/terraform/infrastructure/openstack/modules/stackit_loadbalancer/main.tf
index cbe08c83b..889964ee9 100644
--- a/terraform/infrastructure/openstack/modules/stackit_loadbalancer/main.tf
+++ b/terraform/infrastructure/openstack/modules/stackit_loadbalancer/main.tf
@@ -2,7 +2,7 @@ terraform {
required_providers {
stackit = {
source = "stackitcloud/stackit"
- version = "0.12.0"
+ version = "0.17.0"
}
}
}
diff --git a/terraform/infrastructure/openstack/variables.tf b/terraform/infrastructure/openstack/variables.tf
index 3b0983d68..98714a200 100644
--- a/terraform/infrastructure/openstack/variables.tf
+++ b/terraform/infrastructure/openstack/variables.tf
@@ -48,26 +48,17 @@ variable "cloud" {
description = "Cloud to use within the OpenStack \"clouds.yaml\" file. Optional. If not set, environment variables are used."
}
+variable "openstack_clouds_yaml_path" {
+ type = string
+ default = "~/.config/openstack/clouds.yaml"
+ description = "Path to OpenStack clouds.yaml file"
+}
+
variable "floating_ip_pool_id" {
type = string
description = "Pool (network name) to use for floating IPs."
}
-variable "openstack_user_domain_name" {
- type = string
- description = "OpenStack user domain name."
-}
-
-variable "openstack_username" {
- type = string
- description = "OpenStack user name."
-}
-
-variable "openstack_password" {
- type = string
- description = "OpenStack password."
-}
-
# STACKIT-specific variables
variable "stackit_project_id" {
diff --git a/upgrade-agent/upgradeproto/upgrade.pb.go b/upgrade-agent/upgradeproto/upgrade.pb.go
index faba2c104..a110b3cd0 100644
--- a/upgrade-agent/upgradeproto/upgrade.pb.go
+++ b/upgrade-agent/upgradeproto/upgrade.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.22.1
// source: upgrade-agent/upgradeproto/upgrade.proto
diff --git a/verify/verifyproto/verify.pb.go b/verify/verifyproto/verify.pb.go
index 8a4d9fc84..cc121d32f 100644
--- a/verify/verifyproto/verify.pb.go
+++ b/verify/verifyproto/verify.pb.go
@@ -1,6 +1,6 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.32.0
+// protoc-gen-go v1.33.0
// protoc v4.22.1
// source: verify/verifyproto/verify.proto
diff --git a/version.txt b/version.txt
index 79fa94a5c..47aff0ca5 100644
--- a/version.txt
+++ b/version.txt
@@ -1 +1 @@
-v2.16.0-pre
+v2.16.4