diff --git a/docs/versioned_docs/version-2.11/_media/SLSA-Badge-full-level3.svg b/docs/versioned_docs/version-2.11/_media/SLSA-Badge-full-level3.svg new file mode 100644 index 000000000..7154d4a13 --- /dev/null +++ b/docs/versioned_docs/version-2.11/_media/SLSA-Badge-full-level3.svg @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_fio_azure_bw.png b/docs/versioned_docs/version-2.11/_media/benchmark_fio_azure_bw.png new file mode 100644 index 000000000..a82ebe2d0 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_fio_azure_bw.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_fio_azure_iops.png b/docs/versioned_docs/version-2.11/_media/benchmark_fio_azure_iops.png new file mode 100644 index 000000000..1723257a8 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_fio_azure_iops.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_fio_gcp_bw.png b/docs/versioned_docs/version-2.11/_media/benchmark_fio_gcp_bw.png new file mode 100644 index 000000000..4f0ecc94b Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_fio_gcp_bw.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_fio_gcp_iops.png b/docs/versioned_docs/version-2.11/_media/benchmark_fio_gcp_iops.png new file mode 100644 index 000000000..571086da2 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_fio_gcp_iops.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_net_p2p_azure.png b/docs/versioned_docs/version-2.11/_media/benchmark_net_p2p_azure.png new file mode 100644 index 000000000..9130349c7 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_net_p2p_azure.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_net_p2p_gcp.png b/docs/versioned_docs/version-2.11/_media/benchmark_net_p2p_gcp.png new file mode 100644 index 000000000..a41557e96 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_net_p2p_gcp.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_net_p2svc_azure.png b/docs/versioned_docs/version-2.11/_media/benchmark_net_p2svc_azure.png new file mode 100644 index 000000000..d83e17f5a Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_net_p2svc_azure.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_net_p2svc_gcp.png b/docs/versioned_docs/version-2.11/_media/benchmark_net_p2svc_gcp.png new file mode 100644 index 000000000..55916a1de Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_net_p2svc_gcp.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/max_latency.png b/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/max_latency.png new file mode 100644 index 000000000..696250181 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/max_latency.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/mean_latency.png b/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/mean_latency.png new file mode 100644 index 000000000..3b43298ac Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/mean_latency.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/min_latency.png b/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/min_latency.png new file mode 100644 index 000000000..1046df67e Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/min_latency.png differ diff --git a/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/p99_latency.png b/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/p99_latency.png new file mode 100644 index 000000000..0190118b2 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/benchmark_vault/5replicas/p99_latency.png differ diff --git a/docs/versioned_docs/version-2.11/_media/concept-constellation.svg b/docs/versioned_docs/version-2.11/_media/concept-constellation.svg new file mode 100644 index 000000000..caa7f847d --- /dev/null +++ b/docs/versioned_docs/version-2.11/_media/concept-constellation.svg @@ -0,0 +1,657 @@ + + diff --git a/docs/versioned_docs/version-2.11/_media/concept-managed.svg b/docs/versioned_docs/version-2.11/_media/concept-managed.svg new file mode 100644 index 000000000..718412aad --- /dev/null +++ b/docs/versioned_docs/version-2.11/_media/concept-managed.svg @@ -0,0 +1,847 @@ + + diff --git a/docs/versioned_docs/version-2.11/_media/constellation_oneline.svg b/docs/versioned_docs/version-2.11/_media/constellation_oneline.svg new file mode 100644 index 000000000..4e354958a --- /dev/null +++ b/docs/versioned_docs/version-2.11/_media/constellation_oneline.svg @@ -0,0 +1,52 @@ + + + + + + + + diff --git a/docs/versioned_docs/version-2.11/_media/example-emojivoto.jpg b/docs/versioned_docs/version-2.11/_media/example-emojivoto.jpg new file mode 100644 index 000000000..4be0d5b26 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/example-emojivoto.jpg differ diff --git a/docs/versioned_docs/version-2.11/_media/example-online-boutique.jpg b/docs/versioned_docs/version-2.11/_media/example-online-boutique.jpg new file mode 100644 index 000000000..026f0d865 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/example-online-boutique.jpg differ diff --git a/docs/versioned_docs/version-2.11/_media/product-overview-dark.png b/docs/versioned_docs/version-2.11/_media/product-overview-dark.png new file mode 100644 index 000000000..4aab5f8bd Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/product-overview-dark.png differ diff --git a/docs/versioned_docs/version-2.11/_media/product-overview.png b/docs/versioned_docs/version-2.11/_media/product-overview.png new file mode 100644 index 000000000..c44e633cd Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/product-overview.png differ diff --git a/docs/versioned_docs/version-2.11/_media/recovery-gcp-serial-console-link.png b/docs/versioned_docs/version-2.11/_media/recovery-gcp-serial-console-link.png new file mode 100644 index 000000000..eb67f0e99 Binary files /dev/null and b/docs/versioned_docs/version-2.11/_media/recovery-gcp-serial-console-link.png differ diff --git a/docs/versioned_docs/version-2.11/_media/tcb.svg b/docs/versioned_docs/version-2.11/_media/tcb.svg new file mode 100644 index 000000000..f692ffd0e --- /dev/null +++ b/docs/versioned_docs/version-2.11/_media/tcb.svg @@ -0,0 +1,1287 @@ + + diff --git a/docs/versioned_docs/version-2.11/architecture/attestation.md b/docs/versioned_docs/version-2.11/architecture/attestation.md new file mode 100644 index 000000000..07ac3aa72 --- /dev/null +++ b/docs/versioned_docs/version-2.11/architecture/attestation.md @@ -0,0 +1,316 @@ +# Attestation + +This page explains Constellation's attestation process and highlights the cornerstones of its trust model. + +## Terms + +The following lists terms and concepts that help to understand the attestation concept of Constellation. + +### Trusted Platform Module (TPM) + +A TPM chip is a dedicated tamper-resistant crypto-processor. +It can securely store artifacts such as passwords, certificates, encryption keys, or *runtime measurements* (more on this below). +When a TPM is implemented in software, it's typically called a *virtual* TPM (vTPM). + +### Runtime measurement + +A runtime measurement is a cryptographic hash of the memory pages of a so called *runtime component*. Runtime components of interest typically include a system's bootloader or OS kernel. + +### Platform Configuration Register (PCR) + +A Platform Configuration Register (PCR) is a memory location in the TPM that has some unique properties. +To store a new value in a PCR, the existing value is extended with a new value as follows: + +``` +PCR[N] = HASHalg( PCR[N] || ArgumentOfExtend ) +``` + +The PCRs are typically used to store runtime measurements. +The new value of a PCR is always an extension of the existing value. +Thus, storing the measurements of multiple components into the same PCR irreversibly links them together. + +### Measured boot + +Measured boot builds on the concept of chained runtime measurements. +Each component in the boot chain loads and measures the next component into the PCR before executing it. +By comparing the resulting PCR values against trusted reference values, the integrity of the entire boot chain and thereby the running system can be ensured. + +### Remote attestation (RA) + +Remote attestation is the process of verifying certain properties of an application or platform, such as integrity and confidentiality, from a remote location. +In the case of a measured boot, the goal is to obtain a signed attestation statement on the PCR values of the boot measurements. +The statement can then be verified and compared to a set of trusted reference values. +This way, the integrity of the platform can be ensured before sharing secrets with it. + +### Confidential virtual machine (CVM) + +Confidential computing (CC) is the protection of data in-use with hardware-based trusted execution environments (TEEs). +With CVMs, TEEs encapsulate entire virtual machines and isolate them against the hypervisor, other VMs, and direct memory access. +After loading the initial VM image into encrypted memory, the hypervisor calls for a secure processor to measure these initial memory pages. +The secure processor locks these pages and generates an attestation report on the initial page measurements. +CVM memory pages are encrypted with a key that resides inside the secure processor, which makes sure only the guest VM can access them. +The attestation report is signed by the secure processor and can be verified using remote attestation via the certificate authority of the hardware vendor. +Such an attestation statement guarantees the confidentiality and integrity of a CVM. + +### Attested TLS (aTLS) + +In a CC environment, attested TLS (aTLS) can be used to establish secure connections between two parties using the remote attestation features of the CC components. + +aTLS modifies the TLS handshake by embedding an attestation statement into the TLS certificate. +Instead of relying on a certificate authority, aTLS uses this attestation statement to establish trust in the certificate. + +The protocol can be used by clients to verify a server certificate, by a server to verify a client certificate, or for mutual verification (mutual aTLS). + +## Overview + +The challenge for Constellation is to lift a CVM's attestation statement to the Kubernetes software layer and make it end-to-end verifiable. +From there, Constellation needs to expand the attestation from a single CVM to the entire cluster. + +The [*JoinService*](microservices.md#joinservice) and [*VerificationService*](microservices.md#verificationservice) are where all runs together. +Internally, the *JoinService* uses remote attestation to securely join CVM nodes to the cluster. +Externally, the *VerificationService* provides an attestation statement for the cluster's CVMs and configuration. + +The following explains the details of both steps. + +## Node attestation + +The idea is that Constellation nodes should have verifiable integrity from the CVM hardware measurement up to the Kubernetes software layer. +The solution is a verifiable boot chain and an integrity-protected runtime environment. + +Constellation uses measured boot within CVMs, measuring each component in the boot process before executing it. +Outside of CC, it's usually implemented via TPMs. +CVM technologies differ in how they implement runtime measurements, but the general concepts are similar to those of a TPM. +For simplicity, TPM terminology like *PCR* is used in the following. + +When a Constellation node image boots inside a CVM, it uses measured boot for all stages and components of the boot chain. +This process goes up to the root filesystem. +The root filesystem is mounted read-only with integrity protection, guaranteeing forward integrity. +For the details on the image and boot stages see the [image architecture](../architecture/images.md) documentation. +Any changes to the image will inevitably also change the measured boot's PCR values. +To create a node attestation statement, the Constellation image obtains a CVM attestation statement from the hardware. +This includes the runtime measurements and thereby binds the measured boot results to the CVM hardware measurement. + +In addition to the image measurements, Constellation extends a PCR during the [initialization phase](../workflows/create.md#the-init-step) that irrevocably marks the node as initialized. +The measurement is created using the [*clusterID*](../architecture/keys.md#cluster-identity), tying all future attestation statements to this ID. +Thereby, an attestation statement is unique for every cluster and a node can be identified unambiguously as being initialized. + +To verify an attestation, the hardware's signature and a statement are verified first to establish trust in the contained runtime measurements. +If successful, the measurements are verified against the trusted values of the particular Constellation release version. +Finally, the measurement of the *clusterID* can be compared by calculating it with the [master secret](keys.md#master-secret). + +### Runtime measurements + +Constellation uses runtime measurements to implement the measured boot approach. +As stated above, the underlying hardware technology and guest firmware differ in their implementations of runtime measurements. +The following gives a detailed description of the available measurements in the different cloud environments. + +The runtime measurements consist of two types of values: + +* **Measurements produced by the cloud infrastructure and firmware of the CVM**: +These are measurements of closed-source firmware and other values controlled by the cloud provider. +While not being reproducible for the user, some of them can be compared against previously observed values. +Others may change frequently and aren't suitable for verification. +The [signed image measurements](#chain-of-trust) include measurements that are known, previously observed values. + +* **Measurements produced by the Constellation bootloader and boot chain**: +The Constellation Bootloader takes over from the CVM firmware and [measures the rest of the boot chain](images.md). +The Constellation [Bootstrapper](microservices.md#bootstrapper) is the first user mode component that runs in a Constellation image. +It extends PCR registers with the [IDs](keys.md#cluster-identity) of the cluster marking a node as initialized. + +Constellation allows to specify in the config which measurements should be enforced during the attestation process. +Enforcing non-reproducible measurements controlled by the cloud provider means that changes in these values require manual updates to the cluster's config. +By default, Constellation only enforces measurements that are stable values produced by the infrastructure or by Constellation directly. + + + + +Constellation uses the [vTPM](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch#vtpm) feature of Azure CVMs for runtime measurements. +This vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. +It provides a [measured boot](https://docs.microsoft.com/en-us/azure/security/fundamentals/measured-boot-host-attestation#measured-boot) verification that's based on the trusted launch feature of [Trusted Launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch). + +The following table lists all PCR values of the vTPM and the measured components. +It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. +The latter means that the value can be generated offline and compared to the one in the vTPM. + +| PCR | Components | Measured by | Reproducible and verifiable | +| ----------- | ---------------------------------------------------------------- | ------------------------------- | --------------------------- | +| 0 | Firmware | Azure | No | +| 1 | Firmware | Azure | No | +| 2 | Firmware | Azure | No | +| 3 | Firmware | Azure | No | +| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | Azure, Constellation Bootloader | Yes | +| 5 | Reserved | Azure | No | +| 6 | VM Unique ID | Azure | No | +| 7 | Secure Boot State | Azure, Constellation Bootloader | No | +| 8 | - | - | - | +| 9 | initramfs | Linux Kernel | Yes | +| 10 | User space | Linux IMA | No[^1] | +| 11 | Reserved for Unified Kernel Image components | (Constellation Bootloader) | Yes | +| 12 | Kernel command line | Constellation Bootloader | Yes | +| 13 | Reserved | (Constellation Bootloader) | Yes | +| 14 | Secure Boot State | Constellation Bootloader | No | +| 15 | ClusterID | Constellation Bootstrapper | Yes | +| 16–23 | Unused | - | - | + + + + +Constellation uses the [vTPM](https://cloud.google.com/compute/confidential-vm/docs/about-cvm) feature of CVMs on GCP for runtime measurements. +Note that this vTPM doesn't run inside the hardware-protected CVM context, but is emulated by the hypervisor. + +The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. +It provides a [launch attestation report](https://cloud.google.com/compute/confidential-vm/docs/monitoring#about_launch_attestation_report_events) that's based on the measured boot feature of [Shielded VMs](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#measured-boot). + +The following table lists all PCR values of the vTPM and the measured components. +It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. +The latter means that the value can be generated offline and compared to the one in the vTPM. + +| PCR | Components | Measured by | Reproducible and verifiable | +| ----------- | ---------------------------------------------------------------- | ----------------------------- | --------------------------- | +| 0 | CVM version and technology | GCP | No | +| 1 | Firmware | GCP | No | +| 2 | Firmware | GCP | No | +| 3 | Firmware | GCP | No | +| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | GCP, Constellation Bootloader | Yes | +| 5 | Disk GUID partition table | GCP | No | +| 6 | Disk GUID partition table | GCP | No | +| 7 | GCP Secure Boot Policy | GCP, Constellation Bootloader | No | +| 8 | - | - | - | +| 9 | initramfs | Linux Kernel | Yes | +| 10 | User space | Linux IMA | No[^1] | +| 11 | Reserved for Unified Kernel Image components | (Constellation Bootloader) | Yes | +| 12 | Kernel command line | Constellation Bootloader | Yes | +| 13 | Reserved | (Constellation Bootloader) | Yes | +| 14 | Secure Boot State | Constellation Bootloader | No | +| 15 | ClusterID | Constellation Bootstrapper | Yes | +| 16–23 | Unused | - | - | + + + + +Constellation uses the [vTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) (NitroTPM) feature of the [AWS Nitro System](http://aws.amazon.com/ec2/nitro/) on AWS for runtime measurements. + +The vTPM adheres to the [TPM 2.0](https://trustedcomputinggroup.org/resource/tpm-library-specification/) specification. +The VMs are attested by obtaining signed PCR values over the VM's boot configuration from the TPM and comparing them to a known, good state (measured boot). + +The following table lists all PCR values of the vTPM and the measured components. +It also lists what components of the boot chain did the measurements and if the value is reproducible and verifiable. +The latter means that the value can be generated offline and compared to the one in the vTPM. + +| PCR | Components | Measured by | Reproducible and verifiable | +| ----------- | ---------------------------------------------------------------- | ----------------------------- | --------------------------- | +| 0 | Firmware | AWS | No | +| 1 | Firmware | AWS | No | +| 2 | Firmware | AWS | No | +| 3 | Firmware | AWS | No | +| 4 | Constellation Bootloader, Kernel, initramfs, Kernel command line | AWS, Constellation Bootloader | Yes | +| 5 | Firmware | AWS | No | +| 6 | Firmware | AWS | No | +| 7 | Secure Boot Policy | AWS, Constellation Bootloader | No | +| 8 | - | - | - | +| 9 | initramfs | Linux Kernel | Yes | +| 10 | User space | Linux IMA | No[^1] | +| 11 | Reserved for Unified Kernel Image components | (Constellation Bootloader) | Yes | +| 12 | Kernel command line | Constellation Bootloader | Yes | +| 13 | Reserved | (Constellation Bootloader) | Yes | +| 14 | Secure Boot State | Constellation Bootloader | No | +| 15 | ClusterID | Constellation Bootstrapper | Yes | +| 16–23 | Unused | - | - | + + + + +### CVM verification + +To verify the integrity of the received attestation statement, a chain of trust from the CVM technology to the interface providing the statement has to be established. +For verification of the CVM technology, Constellation may expose additional options in its config file. + + + + +On Azure, AMD SEV-SNP is used to provide runtime encryption to the VMs. +An SEV-SNP attestation report is used to establish trust in the vTPM running inside the VM. +You may customize certain parameters for verification of the attestation statement using the Constellation config file. + +* TCB versions + + You can set the minimum version numbers of components in the SEV-SNP TCB. + Use the latest versions to enforce that only machines with the most recent firmware updates are allowed to join the cluster. + Alternatively, you can set a lower minimum version to allow slightly out-of-date machines to still be able to join the cluster. + +* AMD Root Key Certificate + + This certificate is the root of trust for verifying the SEV-SNP certificate chain. + +* Firmware Signer + + This config option allows you to specify how the firmware signer should be verified. + More explicitly, it controls the verification of the `IDKeyDigest` value in the SEV-SNP attestation report. + You can provide a list of accepted key digests and specify a policy on how this list is compared against the reported `IDKeyDigest`. + + + + +There is no additional configuration available for GCP. + + + + +There is no additional configuration available for AWS. + + + + +## Cluster attestation + +Cluster-facing, Constellation's [*JoinService*](microservices.md#joinservice) verifies each node joining the cluster given the configured ground truth runtime measurements. +User-facing, the [*VerificationService*](microservices.md#verificationservice) provides an interface to verify a node using remote attestation. +By verifying the first node during the [initialization](microservices.md#bootstrapper) and configuring the ground truth measurements that are subsequently enforced by the *JoinService*, the whole cluster is verified in a transitive way. + +### Cluster-facing attestation + +The *JoinService* is provided with the runtime measurements of the whitelisted Constellation image version as the ground truth. +During the initialization and the cluster bootstrapping, each node connects to the *JoinService* using [aTLS](#attested-tls-atls). +During the handshake, the node transmits an attestation statement including its runtime measurements. +The *JoinService* verifies that statement and compares the measurements against the ground truth. +For details of the initialization process check the [microservice descriptions](microservices.md). + +After the initialization, every node updates its runtime measurements with the *clusterID* value, marking it irreversibly as initialized. +When an initialized node tries to join another cluster, its measurements inevitably mismatch the measurements of an uninitialized node and it will be declined. + +### User-facing attestation + +The [*VerificationService*](microservices.md#verificationservice) provides an endpoint for obtaining its hardware-based remote attestation statement, which includes the runtime measurements. +A user can [verify](../workflows/verify-cluster.md) this statement and compare the measurements against the configured ground truth and, thus, verify the identity and integrity of all Constellation components and the cluster configuration. Subsequently, the user knows that the entire cluster is in the expected state and is trustworthy. + +## Chain of trust + +So far, this page described how an entire Constellation cluster can be verified using hardware attestation capabilities and runtime measurements. +The last missing link is how the ground truth in the form of runtime measurements can be securely distributed to the verifying party. + +The build process of Constellation images also creates the ground truth runtime measurements. +With every release, Edgeless Systems publishes signed runtime measurements. + +The CLI executable is also signed by Edgeless Systems. +You can [verify its signature](../workflows/verify-cli.md). + +The CLI contains the public key required to verify signed runtime measurements from Edgeless Systems. +When a cluster is [created](../workflows/create.md) or [upgraded](../workflows/upgrade.md), the CLI automatically verifies the measurements for the selected image. + +Thus, there's a chain of trust based on cryptographic signatures, which goes from CLI to runtime measurements to images. This is illustrated in the following diagram. + +```mermaid +flowchart LR + A[Edgeless]-- "signs (cosign)" -->B[CLI] + C[User]-- "verifies (cosign)" -->B[CLI] + B[CLI]-- "contains" -->D["Public Key"] + A[Edgeless]-- "signs" -->E["Runtime measurements"] + D["Public key"]-- "verifies" -->E["Runtime measurements"] + E["Runtime measurements"]-- "verify" -->F["Constellation cluster"] +``` + +## References + +[^1]: Linux IMA produces runtime measurements of user-space binaries. +However, these measurements aren't deterministic and thus, PCR\[10] can't be compared to a constant value. +Instead, a policy engine must be used to verify the TPM event log against a policy. diff --git a/docs/versioned_docs/version-2.11/architecture/encrypted-storage.md b/docs/versioned_docs/version-2.11/architecture/encrypted-storage.md new file mode 100644 index 000000000..4288ddac2 --- /dev/null +++ b/docs/versioned_docs/version-2.11/architecture/encrypted-storage.md @@ -0,0 +1,57 @@ +# Encrypted persistent storage + +Confidential VMs provide runtime memory encryption to protect data in use. +In the context of Kubernetes, this is sufficient for the confidentiality and integrity of stateless services. +Consider a front-end web server, for example, that keeps all connection information cached in main memory. +No sensitive data is ever written to an insecure medium. +However, many real-world applications need some form of state or data-lake service that's connected to a persistent storage device and requires encryption at rest. +As described in [Use persistent storage](../workflows/storage.md), cloud service providers (CSPs) use the container storage interface (CSI) to make their storage solutions available to Kubernetes workloads. +These CSI storage solutions often support some sort of encryption. +For example, Google Cloud [encrypts data at rest by default](https://cloud.google.com/security/encryption/default-encryption), without any action required by the customer. + +## Cloud provider-managed encryption + +CSP-managed storage solutions encrypt the data in the cloud backend before writing it physically to disk. +In the context of confidential computing and Constellation, the CSP and its managed services aren't trusted. +Hence, cloud provider-managed encryption protects your data from offline hardware access to physical storage devices. +It doesn't protect it from anyone with infrastructure-level access to the storage backend or a malicious insider in the cloud platform. +Even with "bring your own key" or similar concepts, the CSP performs the encryption process with access to the keys and plaintext data. + +In the security model of Constellation, securing persistent storage and thereby data at rest requires that all cryptographic operations are performed inside a trusted execution environment. +Consequently, using CSP-managed encryption of persistent storage usually isn't an option. + +## Constellation-managed encryption + +Constellation provides CSI drivers for storage solutions in all major clouds with built-in encryption support. +Block storage provisioned by the CSP is [mapped](https://guix.gnu.org/manual/en/html_node/Mapped-Devices.html) using the [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html), and optionally the [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html), kernel modules, before it's formatted and accessed by the Kubernetes workloads. +All cryptographic operations happen inside the trusted environment of the confidential Constellation node. + +Note that for integrity-protected disks, [volume expansion](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) isn't supported. + +By default the driver uses data encryption keys (DEKs) issued by the Constellation [*KeyService*](microservices.md#keyservice). +The DEKs are in turn derived from the Constellation's key encryption key (KEK), which is directly derived from the [master secret](keys.md#master-secret). +This is the recommended mode of operation, and also requires the least amount of setup by the cluster administrator. + +Alternatively, the driver can be configured to use a key management system to store and access KEKs and DEKs. + +Refer to [keys and cryptography](keys.md) for more details on key management in Constellation. + +Once deployed and configured, the CSI driver ensures transparent encryption and integrity of all persistent volumes provisioned via its storage class. +Data at rest is secured without any additional actions required by the developer. + +## Cryptographic algorithms + +This section gives an overview of the libraries, cryptographic algorithms, and their configurations, used in Constellation's CSI drivers. + +### dm-crypt + +To interact with the dm-crypt kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). +New devices are formatted as [LUKS2](https://gitlab.com/cryptsetup/LUKS2-docs/-/tree/master) partitions with a sector size of 4096 bytes. +The used key derivation function is [Argon2id](https://datatracker.ietf.org/doc/html/rfc9106) with the [recommended parameters for memory-constrained environments](https://datatracker.ietf.org/doc/html/rfc9106#section-7.4) of 3 iterations and 64 MiB of memory, utilizing 4 parallel threads. +For encryption Constellation uses AES in XTS-Plain64. The key size is 512 bit. + +### dm-integrity + +To interact with the dm-integrity kernel module, Constellation uses [libcryptsetup](https://gitlab.com/cryptsetup/cryptsetup/). +When enabled, the used data integrity algorithm is [HMAC](https://datatracker.ietf.org/doc/html/rfc2104) with SHA256 as the hash function. +The tag size is 32 Bytes. diff --git a/docs/versioned_docs/version-2.11/architecture/images.md b/docs/versioned_docs/version-2.11/architecture/images.md new file mode 100644 index 000000000..8a9c51d36 --- /dev/null +++ b/docs/versioned_docs/version-2.11/architecture/images.md @@ -0,0 +1,49 @@ +# Constellation images + +Constellation uses a minimal version of Fedora as the operating system running inside confidential VMs. This Linux distribution is optimized for containers and designed to be stateless. +The Constellation images provide measured boot and an immutable filesystem. + +## Measured boot + +```mermaid +flowchart LR + Firmware --> Bootloader + Bootloader --> uki + subgraph uki[Unified Kernel Image] + Kernel[Kernel] + initramfs[Initramfs] + cmdline[Kernel Command Line] + end + uki --> rootfs[Root Filesystem] +``` + +Measured boot uses a Trusted Platform Module (TPM) to measure every part of the boot process. This allows for verification of the integrity of a running system at any point in time. To ensure correct measurements of every stage, each stage is responsible to measure the next stage before transitioning. + +### Firmware + +With confidential VMs, the firmware is the root of trust and is measured automatically at boot. After initialization, the firmware will load and measure the bootloader before executing it. + +### Bootloader + +The bootloader is the first modifiable part of the boot chain. The bootloader is tasked with loading the kernel, initramfs and setting the kernel command line. The Constellation bootloader measures these components before starting the kernel. + +### initramfs + +The initramfs is a small filesystem loaded to prepare the actual root filesystem. The Constellation initramfs maps the block device containing the root filesystem with [dm-verity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/verity.html). The initramfs then mounts the root filesystem from the mapped block device. + +dm-verity provides integrity checking using a cryptographic hash tree. When a block is read, its integrity is checked by verifying the tree against a trusted root hash. The initramfs reads this root hash from the previously measured kernel command line. Thus, if any block of the root filesystem's device is modified on disk, trying to read the modified block will result in a kernel panic at runtime. + +After mounting the root filesystem, the initramfs will switch over and start the `init` process of the integrity-protected root filesystem. + +## State disk + +In addition to the read-only root filesystem, each Constellation node has a disk for storing state data. +This disk is mounted readable and writable by the initramfs and contains data that should persist across reboots. +Such data can contain sensitive information and, therefore, must be stored securely. +To that end, the state disk is protected by authenticated encryption. +See the section on [keys and encryption](keys.md#storage-encryption) for more information on the cryptographic primitives in use. + +## Kubernetes components + +During initialization, the [*Bootstrapper*](microservices.md#bootstrapper) downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) as configured by the user. +They're stored on the state partition and can be updated once new releases need to be installed. diff --git a/docs/versioned_docs/version-2.11/architecture/keys.md b/docs/versioned_docs/version-2.11/architecture/keys.md new file mode 100644 index 000000000..f2c8c3fba --- /dev/null +++ b/docs/versioned_docs/version-2.11/architecture/keys.md @@ -0,0 +1,131 @@ +# Key management and cryptographic primitives + +Constellation protects and isolates your cluster and workloads. +To that end, cryptography is the foundation that ensures the confidentiality and integrity of all components. +Evaluating the security and compliance of Constellation requires a precise understanding of the cryptographic primitives and keys used. +The following gives an overview of the architecture and explains the technical details. + +## Confidential VMs + +Confidential VM (CVM) technology comes with hardware and software components for memory encryption, isolation, and remote attestation. +For details on the implementations and cryptographic soundness, refer to the hardware vendors' documentation and advisories. + +## Master secret + +The master secret is the cryptographic material used for deriving the [*clusterID*](#cluster-identity) and the *key encryption key (KEK)* for [storage encryption](#storage-encryption). +It's generated during the bootstrapping of a Constellation cluster. +It can either be managed by [Constellation](#constellation-managed-key-management) or an [external key management system](#user-managed-key-management). +In case of [recovery](#recovery-and-migration), the master secret allows to decrypt the state and recover a Constellation cluster. + +## Cluster identity + +The identity of a Constellation cluster is represented by cryptographic [measurements](attestation.md#runtime-measurements): + +The **base measurements** represent the identity of a valid, uninitialized Constellation node. +They depend on the node image, but are otherwise the same for every Constellation cluster. +On node boot, they're determined using the CVM's attestation mechanism and [measured boot up to the read-only root filesystem](images.md). + +The **clusterID** represents the identity of a single initialized Constellation cluster. +It's derived from the master secret and a cryptographically random salt and unique for every Constellation cluster. +The [Bootstrapper](microservices.md#bootstrapper) measures the *clusterID* into its own PCR before executing any code not measured as part of the *base measurements*. +See [Node attestation](attestation.md#node-attestation) for details. + +The remote attestation statement of a Constellation cluster combines the *base measurements* and the *clusterID* for a verifiable, unspoofable, unique identity. + +## Network encryption + +Constellation encrypts all cluster network communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). +See [network encryption](networking.md) for more details. + +The Cilium agent running on each node establishes a secure [WireGuard](https://www.wireguard.com/) tunnel between it and all other known nodes in the cluster. +Each node creates its own [Curve25519](http://cr.yp.to/ecdh.html) encryption key pair and distributes its public key via Kubernetes. +A node uses another node's public key to decrypt and encrypt traffic from and to Cilium-managed endpoints running on that node. +Connections are always encrypted peer-to-peer using [ChaCha20](http://cr.yp.to/chacha.html) with [Poly1305](http://cr.yp.to/mac.html). +WireGuard implements [forward secrecy with key rotation every 2 minutes](https://lists.zx2c4.com/pipermail/wireguard/2017-December/002141.html). +Cilium supports [key rotation](https://docs.cilium.io/en/stable/security/network/encryption-ipsec/#key-rotation) for the long-term node keys via Kubernetes secrets. + +## Storage encryption + +Constellation supports transparent encryption of persistent storage. +The Linux kernel's device mapper-based encryption features are used to encrypt the data on the block storage level. +Currently, the following primitives are used for block storage encryption: + +* [dm-crypt](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-crypt.html) +* [dm-integrity](https://www.kernel.org/doc/html/latest/admin-guide/device-mapper/dm-integrity.html) + +Adding primitives for integrity protection in the CVM attacker model are under active development and will be available in a future version of Constellation. +See [encrypted storage](encrypted-storage.md) for more details. + +As a cluster administrator, when creating a cluster, you can use the Constellation [installation program](orchestration.md) to select one of the following methods for key management: + +* Constellation-managed key management +* User-managed key management + +### Constellation-managed key management + +#### Key material and key derivation + +During the creation of a Constellation cluster, the cluster's master secret is used to derive a KEK. +This means creating two clusters with the same master secret will yield the same KEK. +Any data encryption key (DEK) is derived from the KEK via HKDF. +Note that the master secret is recommended to be unique for every cluster and shouldn't be reused (except in case of [recovering](../workflows/recovery.md) a cluster). + +#### State and storage + +The KEK is derived from the master secret during the initialization. +Subsequently, all other key material is derived from the KEK. +Given the same KEK, any DEK can be derived deterministically from a given identifier. +Hence, there is no need to store DEKs. They can be derived on demand. +After the KEK was derived, it's stored in memory only and never leaves the CVM context. + +#### Availability + +Constellation-managed key management has the same availability as the underlying Kubernetes cluster. +Therefore, the KEK is stored in the [distributed Kubernetes etcd storage](https://kubernetes.io/docs/tasks/administer-cluster/configure-upgrade-etcd/) to allow for unexpected but non-fatal (control-plane) node failure. +The etcd storage is backed by the encrypted and integrity protected [state disk](images.md#state-disk) of the nodes. + +#### Recovery + +Constellation clusters can be recovered in the event of a disaster, even when all node machines have been stopped and need to be rebooted. +For details on the process see the [recovery workflow](../workflows/recovery.md). + +### User-managed key management + +User-managed key management is under active development and will be available soon. +In scenarios where constellation-managed key management isn't an option, this mode allows you to keep full control of your keys. +For example, compliance requirements may force you to keep your KEKs in an on-prem key management system (KMS). + +During the creation of a Constellation cluster, you specify a KEK present in a remote KMS. +This follows the common scheme of "bring your own key" (BYOK). +Constellation will support several KMSs for managing the storage and access of your KEK. +Initially, it will support the following KMSs: + +* [AWS KMS](https://aws.amazon.com/kms/) +* [GCP KMS](https://cloud.google.com/security-key-management) +* [Azure Key Vault](https://azure.microsoft.com/en-us/services/key-vault/#product-overview) +* [KMIP-compatible KMS](https://www.oasis-open.org/committees/tc_home.php?wg_abbrev=kmip) + +Storing the keys in Cloud KMS of AWS, GCP, or Azure binds the key usage to the particular cloud identity access management (IAM). +In the future, Constellation will support remote attestation-based access policies for Cloud KMS once available. +Note that using a Cloud KMS limits the isolation and protection to the guarantees of the particular offering. + +KMIP support allows you to use your KMIP-compatible on-prem KMS and keep full control over your keys. +This follows the common scheme of "hold your own key" (HYOK). + +The KEK is used to encrypt per-data "data encryption keys" (DEKs). +DEKs are generated to encrypt your data before storing it on persistent storage. +After being encrypted by the KEK, the DEKs are stored on dedicated cloud storage for persistence. +Currently, Constellation supports the following cloud storage options: + +* [AWS S3](https://aws.amazon.com/s3/) +* [GCP Cloud Storage](https://cloud.google.com/storage) +* [Azure Blob Storage](https://azure.microsoft.com/en-us/services/storage/blobs/#overview) + +The DEKs are only present in plaintext form in the encrypted main memory of the CVMs. +Similarly, the cryptographic operations for encrypting data before writing it to persistent storage are performed in the context of the CVMs. + +#### Recovery and migration + +In the case of a disaster, the KEK can be used to decrypt the DEKs locally and subsequently use them to decrypt and retrieve the data. +In case of migration, configuring the same KEK will provide seamless migration of data. +Thus, only the DEK storage needs to be transferred to the new cluster alongside the encrypted data for seamless migration. diff --git a/docs/versioned_docs/version-2.11/architecture/microservices.md b/docs/versioned_docs/version-2.11/architecture/microservices.md new file mode 100644 index 000000000..90bae783b --- /dev/null +++ b/docs/versioned_docs/version-2.11/architecture/microservices.md @@ -0,0 +1,73 @@ +# Microservices + +Constellation takes care of bootstrapping and initializing a Confidential Kubernetes cluster. +During the lifetime of the cluster, it handles day 2 operations such as key management, remote attestation, and updates. +These features are provided by several microservices: + +* The [Bootstrapper](microservices.md#bootstrapper) initializes a Constellation node and bootstraps the cluster +* The [JoinService](microservices.md#joinservice) joins new nodes to an existing cluster +* The [VerificationService](microservices.md#verificationservice) provides remote attestation functionality +* The [KeyService](microservices.md#keyservice) manages Constellation-internal keys + +The relations between microservices are shown in the following diagram: + +```mermaid +flowchart LR + subgraph admin [Admin's machine] + A[Constellation CLI] + end + subgraph img [Constellation OS image] + B[Constellation OS] + C[Bootstrapper] + end + subgraph Kubernetes + D[JoinService] + E[KeyService] + F[VerificationService] + end + A -- deploys --> + B -- starts --> C + C -- deploys --> D + C -- deploys --> E + C -- deploys --> F +``` + +## Bootstrapper + +The *Bootstrapper* is the first microservice launched after booting a Constellation node image. +It sets up that machine as a Kubernetes node and integrates that node into the Kubernetes cluster. +To this end, the *Bootstrapper* first downloads and verifies the [Kubernetes components](https://kubernetes.io/docs/concepts/overview/components/) at the configured versions. +The *Bootstrapper* tries to find an existing cluster and if successful, communicates with the [JoinService](microservices.md#joinservice) to join the node. +Otherwise, it waits for an initialization request to create a new Kubernetes cluster. + +## JoinService + +The *JoinService* runs as [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) on each control-plane node. +New nodes (at cluster start, or later through autoscaling) send a request to the service over [attested TLS (aTLS)](attestation.md#attested-tls-atls). +The *JoinService* verifies the new node's certificate and attestation statement. +If attestation is successful, the new node is supplied with an encryption key from the [*KeyService*](microservices.md#keyservice) for its state disk, and a Kubernetes bootstrap token. + + +```mermaid +sequenceDiagram + participant New node + participant JoinService + New node->>JoinService: aTLS handshake (server side verification) + JoinService-->>New node: # + New node->>+JoinService: IssueJoinTicket(DiskUUID, NodeName, IsControlPlane) + JoinService->>+KeyService: GetDataKey(DiskUUID) + KeyService-->>-JoinService: DiskEncryptionKey + JoinService-->>-New node: DiskEncryptionKey, KubernetesJoinToken, ... +``` + +## VerificationService + +The *VerificationService* runs as DaemonSet on each node. +It provides user-facing functionality for remote attestation during the cluster's lifetime via an endpoint for [verifying the cluster](attestation.md#cluster-attestation). +Read more about the hardware-based [attestation feature](attestation.md) of Constellation and how to [verify](../workflows/verify-cluster.md) a cluster on the client side. + +## KeyService + +The *KeyService* runs as DaemonSet on each control-plane node. +It implements the key management for the [storage encryption keys](keys.md#storage-encryption) in Constellation. These keys are used for the [state disk](images.md#state-disk) of each node and the [transparently encrypted storage](encrypted-storage.md) for Kubernetes. +Depending on wether the [constellation-managed](keys.md#constellation-managed-key-management) or [user-managed](keys.md#user-managed-key-management) mode is used, the *KeyService* holds the key encryption key (KEK) directly or calls an external key management service (KMS) for key derivation respectively. diff --git a/docs/versioned_docs/version-2.11/architecture/networking.md b/docs/versioned_docs/version-2.11/architecture/networking.md new file mode 100644 index 000000000..e9cbdf029 --- /dev/null +++ b/docs/versioned_docs/version-2.11/architecture/networking.md @@ -0,0 +1,22 @@ +# Network encryption + +Constellation encrypts all pod communication using the [container network interface (CNI)](https://github.com/containernetworking/cni). +To that end, Constellation deploys, configures, and operates the [Cilium](https://cilium.io/) CNI plugin. +Cilium provides [transparent encryption](https://docs.cilium.io/en/stable/security/network/encryption) for all cluster traffic using either IPSec or [WireGuard](https://www.wireguard.com/). +Currently, Constellation only supports WireGuard as the encryption engine. +You can read more about the cryptographic soundness of WireGuard [in their white paper](https://www.wireguard.com/papers/wireguard.pdf). + +Cilium is actively working on implementing a feature called [`host-to-host`](https://github.com/cilium/cilium/pull/19401) encryption mode for WireGuard. +With `host-to-host`, all traffic between nodes will be tunneled via WireGuard (host-to-host, host-to-pod, pod-to-host, pod-to-pod). +Until the `host-to-host` feature is released, Constellation enables `pod-to-pod` encryption. +This mode encrypts all traffic between Kubernetes pods using WireGuard tunnels. + +When using Cilium in the default setup but with encryption enabled, there is a [known issue](https://docs.cilium.io/en/v1.12/gettingstarted/encryption/#egress-traffic-to-not-yet-discovered-remote-endpoints-may-be-unencrypted) +that can cause pod-to-pod traffic to be unencrypted. +To mitigate this issue, Constellation adds a *strict* mode to Cilium's `pod-to-pod` encryption. +This mode changes the default behavior of traffic that's destined for an unknown endpoint to not be send out in plaintext, but instead being dropped. +The strict mode distinguishes between traffic that's send to a pod from traffic that's destined for a cluster-external endpoint by considering the pod's CIDR range. + +Traffic originating from hosts isn't encrypted yet. +This mainly includes health checks from Kubernetes API server. +Also, traffic proxied over the API server via e.g. `kubectl port-forward` isn't encrypted. diff --git a/docs/versioned_docs/version-2.11/architecture/orchestration.md b/docs/versioned_docs/version-2.11/architecture/orchestration.md new file mode 100644 index 000000000..2ce4edc73 --- /dev/null +++ b/docs/versioned_docs/version-2.11/architecture/orchestration.md @@ -0,0 +1,78 @@ +# Orchestrating Constellation clusters + +You can use the CLI to create a cluster on the supported cloud platforms. +The CLI provisions the resources in your cloud environment and initiates the initialization of your cluster. +It uses a set of parameters and an optional configuration file to manage your cluster installation. +The CLI is also used for updating your cluster. + +## Workspaces + +Each Constellation cluster has an associated *workspace*. +The workspace is where data such as the Constellation state, config, and ID files are stored. +Each workspace is associated with a single cluster and configuration. +The CLI stores state in the local filesystem making the current directory the active workspace. +Multiple clusters require multiple workspaces, hence, multiple directories. +Note that every operation on a cluster always has to be performed from the directory associated with its workspace. + +## Cluster creation process + +To allow for fine-grained configuration of your cluster and cloud environment, Constellation supports an extensive configuration file with strong defaults. [Generating the configuration file](../workflows/config.md) is typically the first thing you do in the workspace. + +Altogether, the following files are generated during the creation of a Constellation cluster and stored in the current workspace: + +* a configuration file +* an ID file +* a Base64-encoded master secret +* [Terraform artifacts](../reference/terraform.md), stored in subdirectories +* a Kubernetes `kubeconfig` file. + +After the creation of your cluster, the CLI will provide you with a Kubernetes `kubeconfig` file. +This file grants you access to your Kubernetes cluster and configures the [kubectl](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) tool. +In addition, the cluster's [identifier](orchestration.md#post-installation-configuration) is returned and stored in a file called `constellation-id.json` + +### Creation process details + +1. The CLI `create` command creates the confidential VM (CVM) resources in your cloud environment and configures the network +2. Each CVM boots the Constellation node image and measures every component in the boot chain +3. The first microservice launched in each node is the [*Bootstrapper*](microservices.md#bootstrapper) +4. The *Bootstrapper* waits until it either receives an initialization request or discovers an initialized cluster +5. The CLI `init` command connects to the *Bootstrapper* of a selected node, sends the configuration, and initiates the initialization of the cluster +6. The *Bootstrapper* of **that** node [initializes the Kubernetes cluster](microservices.md#bootstrapper) and deploys the other Constellation [microservices](microservices.md) including the [*JoinService*](microservices.md#joinservice) +7. Subsequently, the *Bootstrappers* of the other nodes discover the initialized cluster and send join requests to the *JoinService* +8. As part of the join request each node includes an attestation statement of its boot measurements as authentication +9. The *JoinService* verifies the attestation statements and joins the nodes to the Kubernetes cluster +10. This process is repeated for every node joining the cluster later (e.g., through autoscaling) + +## Post-installation configuration + +Post-installation the CLI provides a configuration for [accessing the cluster using the Kubernetes API](https://kubernetes.io/docs/tasks/administer-cluster/access-cluster-api/). +The `kubeconfig` file provides the credentials and configuration for connecting and authenticating to the API server. +Once configured, orchestrate the Kubernetes cluster via `kubectl`. + +After the initialization, the CLI will present you with a couple of tokens: + +* The [*master secret*](keys.md#master-secret) (stored in the `constellation-mastersecret.json` file by default) +* The [*clusterID*](keys.md#cluster-identity) of your cluster in Base64 encoding + +You can read more about these values and their meaning in the guide on [cluster identity](keys.md#cluster-identity). + +The *master secret* must be kept secret and can be used to [recover your cluster](../workflows/recovery.md). +Instead of managing this secret manually, you can [use your key management solution of choice](keys.md#user-managed-key-management) with Constellation. + +The *clusterID* uniquely identifies a cluster and can be used to [verify your cluster](../workflows/verify-cluster.md). + +## Upgrades + +Constellation images and microservices may need to be upgraded to new versions during the lifetime of a cluster. +Constellation implements a rolling update mechanism ensuring no downtime of the control or data plane. +You can upgrade a Constellation cluster with a single operation by using the CLI. +For step-by-step instructions on how to do this, refer to [Upgrade your cluster](../workflows/upgrade.md). + +### Attestation of upgrades + +With every new image, corresponding measurements are released. +During an update procedure, the CLI provides new measurements to the [JoinService](microservices.md#joinservice) securely. +New measurements for an updated image are automatically pulled and verified by the CLI following the [supply chain security concept](attestation.md#chain-of-trust) of Constellation. +The [attestation section](attestation.md#cluster-facing-attestation) describes in detail how these measurements are then used by the JoinService for the attestation of nodes. + + diff --git a/docs/versioned_docs/version-2.11/architecture/overview.md b/docs/versioned_docs/version-2.11/architecture/overview.md new file mode 100644 index 000000000..324eb99bb --- /dev/null +++ b/docs/versioned_docs/version-2.11/architecture/overview.md @@ -0,0 +1,24 @@ +# Overview + +Constellation is a cloud-based confidential orchestration platform. +The foundation of Constellation is Kubernetes and therefore shares the same technology stack and architecture principles. +To learn more about Constellation and Kubernetes, see [product overview](../overview/product.md). + +## About orchestration and updates + +As a cluster administrator, you can use the [Constellation CLI](orchestration.md) to install and deploy a cluster. +Updates are provided in accordance with the [support policy](versions.md). + +## About microservices and attestation + +Constellation manages the nodes and network in your cluster. All nodes are bootstrapped by the [*Bootstrapper*](microservices.md#bootstrapper). They're verified and authenticated by the [*JoinService*](microservices.md#joinservice) before being added to the cluster and the network. Finally, the entire cluster can be verified via the [*VerificationService*](microservices.md#verificationservice) using [remote attestation](attestation.md). + +## About node images and verified boot + +Constellation comes with operating system images for Kubernetes control-plane and worker nodes. +They're highly optimized for running containerized workloads and specifically prepared for running inside confidential VMs. +You can learn more about [the images](images.md) and how verified boot ensures their integrity during boot and beyond. + +## About key management and cryptographic primitives + +Encryption of data at-rest, in-transit, and in-use is the fundamental building block for confidential computing and Constellation. Learn more about the [keys and cryptographic primitives](keys.md) used in Constellation, [encrypted persistent storage](encrypted-storage.md), and [network encryption](networking.md). diff --git a/docs/versioned_docs/version-2.11/architecture/versions.md b/docs/versioned_docs/version-2.11/architecture/versions.md new file mode 100644 index 000000000..8ebfc7e32 --- /dev/null +++ b/docs/versioned_docs/version-2.11/architecture/versions.md @@ -0,0 +1,14 @@ +# Versions and support policy + +All components of Constellation use a three-digit version number of the form `v..`. +The components are released in lock step, usually on the first Tuesday of every month. This release primarily introduces new features, but may also include security or performance improvements. The `MINOR` version will be incremented as part of this release. + +Additional `PATCH` releases may be created on demand, to fix security issues or bugs before the next `MINOR` release window. + +New releases are published on [GitHub](https://github.com/edgelesssys/constellation/releases). + +## Kubernetes support policy + +Constellation is aligned to the [version support policy of Kubernetes](https://kubernetes.io/releases/version-skew-policy/#supported-versions), and therefore usually supports the most recent three minor versions. +When a new minor version of Kubernetes is released, support is added to the next Constellation release, and that version then supports four Kubernetes versions. +Subsequent Constellation releases drop support for the oldest (and deprecated) Kubernetes version. diff --git a/docs/versioned_docs/version-2.11/getting-started/examples.md b/docs/versioned_docs/version-2.11/getting-started/examples.md new file mode 100644 index 000000000..fded84980 --- /dev/null +++ b/docs/versioned_docs/version-2.11/getting-started/examples.md @@ -0,0 +1,6 @@ +# Examples + +After you [installed the CLI](install.md) and [created your first cluster](first-steps.md), you're ready to deploy applications. Why not start with one of the following examples? +* [Emojivoto](examples/emojivoto.md): a simple but fun web application +* [Online Boutique](examples/online-boutique.md): an e-commerce demo application by Google consisting of 11 separate microservices +* [Horizontal Pod Autoscaling](examples/horizontal-scaling.md): an example demonstrating Constellation's autoscaling capabilities diff --git a/docs/versioned_docs/version-2.11/getting-started/examples/emojivoto.md b/docs/versioned_docs/version-2.11/getting-started/examples/emojivoto.md new file mode 100644 index 000000000..2bbe27917 --- /dev/null +++ b/docs/versioned_docs/version-2.11/getting-started/examples/emojivoto.md @@ -0,0 +1,22 @@ +# Emojivoto +[Emojivoto](https://github.com/BuoyantIO/emojivoto) is a simple and fun application that's well suited to test the basic functionality of your cluster. + + + +emojivoto - Web UI + + + +1. Deploy the application: + ```bash + kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment + ``` +2. Wait until it becomes available: + ```bash + kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments + ``` +3. Forward the web service to your machine: + ```bash + kubectl -n emojivoto port-forward svc/web-svc 8080:80 + ``` +4. Visit [http://localhost:8080](http://localhost:8080) diff --git a/docs/versioned_docs/version-2.11/getting-started/examples/horizontal-scaling.md b/docs/versioned_docs/version-2.11/getting-started/examples/horizontal-scaling.md new file mode 100644 index 000000000..dfaf9e742 --- /dev/null +++ b/docs/versioned_docs/version-2.11/getting-started/examples/horizontal-scaling.md @@ -0,0 +1,98 @@ +# Horizontal Pod Autoscaling +This example demonstrates Constellation's autoscaling capabilities. It's based on the Kubernetes [HorizontalPodAutoscaler Walkthrough](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/). During the following steps, Constellation will spawn new VMs on demand, verify them, add them to the cluster, and delete them again when the load has settled down. + +## Requirements +The cluster needs to be initialized with Kubernetes 1.23 or later. In addition, [autoscaling must be enabled](../../workflows/scale.md) to enable Constellation to assign new nodes dynamically. + +Just for this example specifically, the cluster should have as few worker nodes in the beginning as possible. Start with a small cluster with only *one* low-powered node for the control-plane node and *one* low-powered worker node. + +:::info +We tested the example using instances of types `Standard_DC4as_v5` on Azure and `n2d-standard-4` on GCP. +::: + +## Setup + +1. Install the Kubernetes Metrics Server: + ```bash + kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml + ``` + +2. Deploy the HPA example server that's supposed to be scaled under load. + + This manifest is similar to the one from the Kubernetes HPA walkthrough, but with increased CPU limits and requests to facilitate the triggering of node scaling events. + ```bash + cat < + +Online Boutique - Web UI + + + +1. Create a namespace: + ```bash + kubectl create ns boutique + ``` +2. Deploy the application: + ```bash + kubectl apply -n boutique -f https://github.com/GoogleCloudPlatform/microservices-demo/raw/main/release/kubernetes-manifests.yaml + ``` +3. Wait for all services to become available: + ```bash + kubectl wait --for=condition=available --timeout=300s -n boutique --all deployments + ``` +4. Get the frontend's external IP address: + ```shell-session + $ kubectl get service frontend-external -n boutique | awk '{print $4}' + EXTERNAL-IP + + ``` + (`` is a placeholder for the IP assigned by your CSP.) +5. Enter the IP from the result in your browser to browse the online shop. diff --git a/docs/versioned_docs/version-2.11/getting-started/first-steps-local.md b/docs/versioned_docs/version-2.11/getting-started/first-steps-local.md new file mode 100644 index 000000000..de9c66e9b --- /dev/null +++ b/docs/versioned_docs/version-2.11/getting-started/first-steps-local.md @@ -0,0 +1,283 @@ +# First steps with a local cluster + +A local cluster lets you deploy and test Constellation without a cloud subscription. +You have two options: + +* Use MiniConstellation to automatically deploy a two-node cluster. +* For more fine-grained control, create the cluster using the QEMU provider. + +Both options use virtualization to create a local cluster with control-plane nodes and worker nodes. They **don't** require hardware with Confidential VM (CVM) support. For attestation, they currently use a software-based vTPM provided by KVM/QEMU. + +You need an x64 machine with a Linux OS. +You can use a VM, but it needs nested virtualization. + +## Prerequisites + +* Machine requirements: + * An x86-64 CPU with at least 4 cores (6 cores are recommended) + * At least 4 GB RAM (6 GB are recommended) + * 20 GB of free disk space + * Hardware virtualization enabled in the BIOS/UEFI (often referred to as Intel VT-x or AMD-V/SVM) / nested-virtualization support when using a VM +* Software requirements: + * Linux OS with [KVM kernel module](https://www.linux-kvm.org/page/Main_Page) + * Recommended: Ubuntu 22.04 LTS + * [Docker](https://docs.docker.com/engine/install/) + * [xsltproc](https://gitlab.gnome.org/GNOME/libxslt/-/wikis/home) + * (Optional) [virsh](https://www.libvirt.org/manpages/virsh.html) to observe and access your nodes + +### Software installation on Ubuntu + +```bash +# install Docker +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg +echo "deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt update +sudo apt install docker-ce +# install other dependencies +sudo apt install xsltproc +sudo snap install kubectl --classic +# install Constellation CLI +curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 +sudo install constellation-linux-amd64 /usr/local/bin/constellation +# do not drop forwarded packages +sudo iptables -P FORWARD ACCEPT +``` + +## Create a cluster + + + + + +With the `constellation mini` command, you can deploy and test Constellation locally. This mode is called MiniConstellation. Conceptually, MiniConstellation is similar to [MicroK8s](https://microk8s.io/), [K3s](https://k3s.io/), and [minikube](https://minikube.sigs.k8s.io/docs/). + + +:::caution + +MiniConstellation has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. + +::: + +:::note + +Since MiniConstellation runs on your local system, cloud features such as load balancing, +attaching persistent storage, or autoscaling aren't available. + +::: + +The following creates your MiniConstellation cluster (may take up to 10 minutes to complete): + +```bash +constellation mini up +``` + +This will configure your current directory as the [workspace](../architecture/orchestration.md#workspaces) for this cluster. +All `constellation` commands concerning this cluster need to be issued from this directory. + + + + +With the QEMU provider, you can create a local Constellation cluster as if it were in the cloud. The provider uses [QEMU](https://www.qemu.org/) to create multiple VMs for the cluster nodes, which interact with each other. + +:::caution + +Constellation on QEMU has specific soft- and hardware requirements such as a Linux OS running on an x86-64 CPU. Pay attention to all [prerequisites](#prerequisites) when setting up. + +::: + +:::note + +Since Constellation on QEMU runs on your local system, cloud features such as load balancing, +attaching persistent storage, or autoscaling aren't available. + +::: + +1. To set up your local cluster, you need to create a configuration file for Constellation first. + + ```bash + constellation config generate qemu + ``` + + This creates a [configuration file](../workflows/config.md) for QEMU called `constellation-conf.yaml`. After that, your current folder also becomes your [workspace](../architecture/orchestration.md#workspaces). All `constellation` commands for your cluster need to be executed from this directory. + +2. Now you can create your cluster and its nodes. `constellation create` uses the options set in `constellation-conf.yaml`. + + ```bash + constellation create + ``` + + The Output should look like the following: + + ```shell-session + $ constellation create + Your Constellation cluster was created successfully. + ``` + +3. Initialize the cluster + + ```bash + constellation init + ``` + + This should give the following output: + + ```shell-session + $ constellation init + Your Constellation master secret was successfully written to ./constellation-mastersecret.json + Note: If you just created the cluster, it can take a few minutes to connect. + Initializing cluster ... + Your Constellation cluster was successfully initialized. + + Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= + Kubernetes configuration constellation-admin.conf + + You can now connect to your cluster by executing: + export KUBECONFIG="$PWD/constellation-admin.conf" + ``` + + The cluster's identifier will be different in your output. + Keep `constellation-mastersecret.json` somewhere safe. + This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. + + :::info + + Depending on your setup, `constellation init` may take 10+ minutes to complete. + + ::: + +4. Configure kubectl + + ```bash + export KUBECONFIG="$PWD/constellation-admin.conf" + ``` + + + + +## Connect to the cluster + +Your cluster initially consists of a single control-plane node: + +```shell-session +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +control-plane-0 Ready control-plane 66s v1.24.6 +``` + +Additional nodes will request to join the cluster shortly. Before each additional node is allowed to join the cluster, its state is verified using remote attestation by the [JoinService](../architecture/microservices.md#joinservice). +If verification passes successfully, the new node receives keys and certificates to join the cluster. + +You can follow this process by viewing the logs of the JoinService: + +```shell-session +$ kubectl logs -n kube-system daemonsets/join-service -f +{"level":"INFO","ts":"2022-10-14T09:32:20Z","caller":"cmd/main.go:48","msg":"Constellation Node Join Service","version":"2.1.0","cloudProvider":"qemu"} +{"level":"INFO","ts":"2022-10-14T09:32:20Z","logger":"validator","caller":"watcher/validator.go:96","msg":"Updating expected measurements"} +... +``` + +Once all nodes have joined your cluster, it may take a couple of minutes for all resources to become available. +You can check on the state of your cluster by running the following: + +```shell-session +$ kubectl get nodes +NAME STATUS ROLES AGE VERSION +control-plane-0 Ready control-plane 2m59s v1.24.6 +worker-0 Ready 32s v1.24.6 +``` + +## Deploy a sample application + +1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) + + ```bash + kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment + ``` + +2. Expose the frontend service locally + + ```bash + kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments + kubectl -n emojivoto port-forward svc/web-svc 8080:80 & + curl http://localhost:8080 + kill %1 + ``` + +## Terminate your cluster + + + + +Once you are done, you can clean up the created resources using the following command: + +```bash +constellation mini down +``` + +This will destroy your cluster and clean up your workspace. +The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. + + + + +Once you are done, you can clean up the created resources using the following command: + +```bash +constellation terminate +``` + +This should give the following output: + +```shell-session +$ constellation terminate +You are about to terminate a Constellation cluster. +All of its associated resources will be DESTROYED. +This action is irreversible and ALL DATA WILL BE LOST. +Do you want to continue? [y/n]: +``` + +Confirm with `y` to terminate the cluster: + +```shell-session +Terminating ... +Your Constellation cluster was terminated successfully. +``` + +This will destroy your cluster and clean up your workspace. +The VM image and cluster configuration file (`constellation-conf.yaml`) will be kept and may be reused to create new clusters. + + + + +## Troubleshooting + +Make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). + +### VMs have no internet access / CLI remains in "Initializing cluster" state + +`iptables` rules may prevent your VMs from accessing the internet. +Make sure your rules aren't dropping forwarded packages. + +List your rules: + +```bash +sudo iptables -S +``` + +The output may look similar to the following: + +```shell-session +-P INPUT ACCEPT +-P FORWARD DROP +-P OUTPUT ACCEPT +-N DOCKER +-N DOCKER-ISOLATION-STAGE-1 +-N DOCKER-ISOLATION-STAGE-2 +-N DOCKER-USER +``` + +If your `FORWARD` chain is set to `DROP`, you need to update your rules: + +```bash +sudo iptables -P FORWARD ACCEPT +``` diff --git a/docs/versioned_docs/version-2.11/getting-started/first-steps.md b/docs/versioned_docs/version-2.11/getting-started/first-steps.md new file mode 100644 index 000000000..0b224e04d --- /dev/null +++ b/docs/versioned_docs/version-2.11/getting-started/first-steps.md @@ -0,0 +1,221 @@ +# First steps with Constellation + +The following steps guide you through the process of creating a cluster and deploying a sample app. This example assumes that you have successfully [installed and set up Constellation](install.md), +and have access to a cloud subscription. + +:::tip +If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. +::: + +:::note +If you encounter any problem with the following steps, make sure to use the [latest release](https://github.com/edgelesssys/constellation/releases/latest) and check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). +::: + +## Create a cluster + +1. Create the [configuration file](../workflows/config.md) for your cloud provider. + + + + + + ```bash + constellation config generate azure + ``` + + + + + + ```bash + constellation config generate gcp + ``` + + + + + + ```bash + constellation config generate aws + ``` + + + + + +2. Create your [IAM configuration](../workflows/config.md#creating-an-iam-configuration). + + + + + + ```bash + constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest --update-config + ``` + + This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + + Note that CVMs are currently only supported in a few regions, check [Azure's products available by region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). These are: + * `westus` + * `eastus` + * `northeurope` + * `westeurope` + * `southeastasia` + + + + + + ```bash + constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test --update-config + ``` + + This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + + Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `C2D` or `N2D`. + + + + + + ```bash + constellation iam create aws --zone=us-east-2a --prefix=constellTest --update-config + ``` + + This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. It also updates the configuration file `constellation-conf.yaml` in your current directory with the IAM values filled in. + + Depending on the attestation variant selected on config generation, different regions are available. + AMD SEV-SNP machines (requires the default attestation variant `awsSEVSNP`) are currently available in the following regions: + * `eu-west-1` + * `us-east-2` + + You can find a list of regions that support AMD SEV-SNP in [AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). + + NitroTPM machines (requires the attestation variant `awsNitroTPM`) are available in all regions. + Constellation OS images are currently replicated to the following regions: + * `eu-central-1` + * `eu-west-1` + * `eu-west-3` + * `us-east-2` + * `ap-south-1` + + If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). + + You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). + + + + + :::tip + To learn about all options you have for managing IAM resources and Constellation configuration, see the [Configuration workflow](../workflows/config.md). + ::: + + + +3. Create the cluster. `constellation create` uses options set in `constellation-conf.yaml`. + If you want to manually use [Terraform](../reference/terraform.md) for managing the cloud resources instead, follow the corresponding instructions in the [Create workflow](../workflows/create.md). + + :::tip + + On Azure, you may need to wait 15+ minutes at this point for role assignments to propagate. + + ::: + + ```bash + constellation create -y + ``` + + This should give the following output: + + ```shell-session + $ constellation create -y + Your Constellation cluster was created successfully. + ``` + +4. Initialize the cluster. + + ```bash + constellation init + ``` + + This should give the following output: + + ```shell-session + $ constellation init + Your Constellation master secret was successfully written to ./constellation-mastersecret.json + Note: If you just created the cluster, it can take a few minutes to connect. + Initializing cluster ... + Your Constellation cluster was successfully initialized. + + Constellation cluster identifier g6iMP5wRU1b7mpOz2WEISlIYSfdAhB0oNaOg6XEwKFY= + Kubernetes configuration constellation-admin.conf + + You can now connect to your cluster by executing: + export KUBECONFIG="$PWD/constellation-admin.conf" + ``` + + The cluster's identifier will be different in your output. + Keep `constellation-mastersecret.json` somewhere safe. + This will allow you to [recover your cluster](../workflows/recovery.md) in case of a disaster. + + :::info + + Depending on your CSP and region, `constellation init` may take 10+ minutes to complete. + + ::: + +5. Configure kubectl. + + ```bash + export KUBECONFIG="$PWD/constellation-admin.conf" + ``` + +## Deploy a sample application + +1. Deploy the [emojivoto app](https://github.com/BuoyantIO/emojivoto) + + ```bash + kubectl apply -k github.com/BuoyantIO/emojivoto/kustomize/deployment + ``` + +2. Expose the frontend service locally + + ```bash + kubectl wait --for=condition=available --timeout=60s -n emojivoto --all deployments + kubectl -n emojivoto port-forward svc/web-svc 8080:80 & + curl http://localhost:8080 + kill %1 + ``` + +## Terminate your cluster + +Use the CLI to terminate your cluster. If you manually used [Terraform](../reference/terraform.md) to manage your cloud resources, follow the corresponding instructions in the [Terminate workflow](../workflows/terminate.md). + +```bash +constellation terminate +``` + +This should give the following output: + +```shell-session +$ constellation terminate +You are about to terminate a Constellation cluster. +All of its associated resources will be DESTROYED. +This action is irreversible and ALL DATA WILL BE LOST. +Do you want to continue? [y/n]: +``` + +Confirm with `y` to terminate the cluster: + +```shell-session +Terminating ... +Your Constellation cluster was terminated successfully. +``` + +Optionally, you can also [delete your IAM resources](../workflows/config.md#deleting-an-iam-configuration). diff --git a/docs/versioned_docs/version-2.11/getting-started/install.md b/docs/versioned_docs/version-2.11/getting-started/install.md new file mode 100644 index 000000000..7c313945a --- /dev/null +++ b/docs/versioned_docs/version-2.11/getting-started/install.md @@ -0,0 +1,362 @@ +# Installation and setup + +Constellation runs entirely in your cloud environment and can be controlled via a dedicated command-line interface (CLI). + +The following guides you through the steps of installing the CLI on your machine, verifying it, and connecting it to your cloud service provider (CSP). + +## Prerequisites + +Make sure the following requirements are met: + +- Your machine is running Linux or macOS +- You have admin rights on your machine +- [kubectl](https://kubernetes.io/docs/tasks/tools/) is installed +- Your CSP is Microsoft Azure, Google Cloud Platform (GCP), or Amazon Web Services (AWS) + +## Install the Constellation CLI + +The CLI executable is available at [GitHub](https://github.com/edgelesssys/constellation/releases). +Install it with the following commands: + + + + +1. Download the CLI: + +```bash +curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-amd64 +``` + +2. [Verify the signature](../workflows/verify-cli.md) (optional) + +3. Install the CLI to your PATH: + +```bash +sudo install constellation-linux-amd64 /usr/local/bin/constellation +``` + + + + +1. Download the CLI: + +```bash +curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-linux-arm64 +``` + +2. [Verify the signature](../workflows/verify-cli.md) (optional) + +3. Install the CLI to your PATH: + +```bash +sudo install constellation-linux-arm64 /usr/local/bin/constellation +``` + + + + + + +1. Download the CLI: + +```bash +curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-arm64 +``` + +2. [Verify the signature](../workflows/verify-cli.md) (optional) + +3. Install the CLI to your PATH: + +```bash +sudo install constellation-darwin-arm64 /usr/local/bin/constellation +``` + + + + + + + +1. Download the CLI: + +```bash +curl -LO https://github.com/edgelesssys/constellation/releases/latest/download/constellation-darwin-amd64 +``` + +2. [Verify the signature](../workflows/verify-cli.md) (optional) + +3. Install the CLI to your PATH: + +```bash +sudo install constellation-darwin-amd64 /usr/local/bin/constellation +``` + + + + +:::tip +The CLI supports autocompletion for various shells. To set it up, run `constellation completion` and follow the given steps. +::: + +## Set up cloud credentials + +The CLI makes authenticated calls to the CSP API. Therefore, you need to set up Constellation with the credentials for your CSP. + +:::tip +If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. +::: + +### Required permissions + + + + +The following [resource providers need to be registered](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/resource-providers-and-types#register-resource-provider) in your subscription: +* `Microsoft.Attestation` \[2] +* `Microsoft.Compute` +* `Microsoft.Insights` +* `Microsoft.ManagedIdentity` +* `Microsoft.Network` + +By default, Constellation tries to register these automatically if they haven't been registered before. + +To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: +* `*/register/action` \[1] +* `Microsoft.Authorization/roleAssignments/*` +* `Microsoft.Authorization/roleDefinitions/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Resources/subscriptions/resourcegroups/*` + +The built-in `Owner` role is a superset of these permissions. + +To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: +* `Microsoft.Attestation/attestationProviders/*` \[2] +* `Microsoft.Compute/virtualMachineScaleSets/*` +* `Microsoft.Insights/components/*` +* `Microsoft.ManagedIdentity/userAssignedIdentities/*` +* `Microsoft.Network/loadBalancers/*` +* `Microsoft.Network/loadBalancers/backendAddressPools/*` +* `Microsoft.Network/networkSecurityGroups/*` +* `Microsoft.Network/publicIPAddresses/*` +* `Microsoft.Network/virtualNetworks/*` +* `Microsoft.Network/virtualNetworks/subnets/*` +* `Microsoft.Network/natGateways/*` + +The built-in `Contributor` role is a superset of these permissions. + +Follow Microsoft's guide on [understanding](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-definitions) and [assigning roles](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments). + +1: You can omit `*/register/Action` if the resource providers mentioned above are already registered and the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable is set to `true` when creating the IAM configuration. + +2: You can omit `Microsoft.Attestation/attestationProviders/*` and the registration of `Microsoft.Attestation` if `EnforceIDKeyDigest` isn't set to `MAAFallback` in the [config file](../workflows/config.md#configure-your-cluster). + + + + +Create a new project for Constellation or use an existing one. +Enable the [Compute Engine API](https://console.cloud.google.com/apis/library/compute.googleapis.com) on it. + +To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: +* `iam.serviceAccountKeys.create` +* `iam.serviceAccountKeys.delete` +* `iam.serviceAccountKeys.get` +* `iam.serviceAccounts.create` +* `iam.serviceAccounts.delete` +* `iam.serviceAccounts.get` +* `resourcemanager.projects.getIamPolicy` +* `resourcemanager.projects.setIamPolicy` + +Together, the built-in roles `roles/editor` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. + +To [create a Constellation cluster](../workflows/create.md#the-create-step), you need the following permissions: +* `compute.addresses.createInternal` +* `compute.addresses.deleteInternal` +* `compute.addresses.get` +* `compute.addresses.useInternal` +* `compute.backendServices.create` +* `compute.backendServices.delete` +* `compute.backendServices.get` +* `compute.backendServices.use` +* `compute.disks.create` +* `compute.firewalls.create` +* `compute.firewalls.delete` +* `compute.firewalls.get` +* `compute.globalAddresses.create` +* `compute.globalAddresses.delete` +* `compute.globalAddresses.get` +* `compute.globalAddresses.use` +* `compute.globalForwardingRules.create` +* `compute.globalForwardingRules.delete` +* `compute.globalForwardingRules.get` +* `compute.globalForwardingRules.setLabels` +* `compute.globalOperations.get` +* `compute.healthChecks.create` +* `compute.healthChecks.delete` +* `compute.healthChecks.get` +* `compute.healthChecks.useReadOnly` +* `compute.instanceGroupManagers.create` +* `compute.instanceGroupManagers.delete` +* `compute.instanceGroupManagers.get` +* `compute.instanceGroups.create` +* `compute.instanceGroups.delete` +* `compute.instanceGroups.get` +* `compute.instanceGroups.use` +* `compute.instances.create` +* `compute.instances.setLabels` +* `compute.instances.setMetadata` +* `compute.instances.setTags` +* `compute.instanceTemplates.create` +* `compute.instanceTemplates.delete` +* `compute.instanceTemplates.get` +* `compute.instanceTemplates.useReadOnly` +* `compute.networks.create` +* `compute.networks.delete` +* `compute.networks.get` +* `compute.networks.updatePolicy` +* `compute.routers.create` +* `compute.routers.delete` +* `compute.routers.get` +* `compute.routers.update` +* `compute.subnetworks.create` +* `compute.subnetworks.delete` +* `compute.subnetworks.get` +* `compute.subnetworks.use` +* `compute.targetTcpProxies.create` +* `compute.targetTcpProxies.delete` +* `compute.targetTcpProxies.get` +* `compute.targetTcpProxies.use` +* `iam.serviceAccounts.actAs` + +Together, the built-in roles `roles/editor`, `roles/compute.instanceAdmin` and `roles/resourcemanager.projectIamAdmin` form a superset of these permissions. + +Follow Google's guide on [understanding](https://cloud.google.com/iam/docs/understanding-roles) and [assigning roles](https://cloud.google.com/iam/docs/granting-changing-revoking-access). + + + + +To set up a Constellation cluster, you need to perform two tasks that require permissions: create the infrastructure and create roles for cluster nodes. Both of these actions can be performed by different users, e.g., an administrator to create roles and a DevOps engineer to create the infrastructure. + +To [create the IAM configuration](../workflows/config.md#creating-an-iam-configuration) for Constellation, you need the following permissions: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:DescribeAccountAttributes", + "iam:AddRoleToInstanceProfile", + "iam:AttachRolePolicy", + "iam:CreateInstanceProfile", + "iam:CreatePolicy", + "iam:CreateRole", + "iam:DeleteInstanceProfile", + "iam:DeletePolicy", + "iam:DeletePolicyVersion", + "iam:DeleteRole", + "iam:DetachRolePolicy", + "iam:GetInstanceProfile", + "iam:GetPolicy", + "iam:GetPolicyVersion", + "iam:GetRole", + "iam:ListAttachedRolePolicies", + "iam:ListInstanceProfilesForRole", + "iam:ListPolicyVersions", + "iam:ListRolePolicies", + "iam:PassRole", + "iam:RemoveRoleFromInstanceProfile", + "sts:GetCallerIdentity" + ], + "Resource": "*" + } + ] +} +``` + +The built-in `AdministratorAccess` policy is a superset of these permissions. + +To [create a Constellation cluster](../workflows/create.md#the-create-step), see the permissions of [main.tf](https://github.com/edgelesssys/constellation/blob/main/cli/internal/terraform/terraform/iam/aws/main.tf). + + +The built-in `PowerUserAccess` policy is a superset of these permissions. + +Follow Amazon's guide on [understanding](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) and [managing policies](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html). + + + + +### Authentication + +You need to authenticate with your CSP. The following lists the required steps for *testing* and *production* environments. + +:::note +The steps for a *testing* environment are simpler. However, they may expose secrets to the CSP. If in doubt, follow the *production* steps. +::: + + + + +**Testing** + +Simply open the [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/overview). + +**Production** + +Use the latest version of the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) on a trusted machine: + +```bash +az login +``` + +Other options are described in Azure's [authentication guide](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). + + + + +**Testing** + +You can use the [Google Cloud Shell](https://cloud.google.com/shell). Make sure your [session is authorized](https://cloud.google.com/shell/docs/auth). For example, execute `gsutil` and accept the authorization prompt. + +**Production** + +Use one of the following options on a trusted machine: + +- Use the [`gcloud` CLI](https://cloud.google.com/sdk/gcloud) + + ```bash + gcloud auth application-default login + ``` + + This will ask you to log-in to your Google account and create your credentials. + The Constellation CLI will automatically load these credentials when needed. + +- Set up a service account and pass the credentials manually + + Follow [Google's guide](https://cloud.google.com/docs/authentication/production#manually) for setting up your credentials. + + + + +**Testing** + +You can use the [AWS CloudShell](https://console.aws.amazon.com/cloudshell/home). Make sure you are [authorized to use it](https://docs.aws.amazon.com/cloudshell/latest/userguide/sec-auth-with-identities.html). + +**Production** + +Use the latest version of the [AWS CLI](https://aws.amazon.com/cli/) on a trusted machine: + +```bash +aws configure +``` + +Options and first steps are described in the [AWS CLI documentation](https://docs.aws.amazon.com/cli/index.html). + + + + + + +## Next steps + +You are now ready to [deploy your first confidential Kubernetes cluster and application](first-steps.md). diff --git a/docs/versioned_docs/version-2.11/intro.md b/docs/versioned_docs/version-2.11/intro.md new file mode 100644 index 000000000..0bfe86da9 --- /dev/null +++ b/docs/versioned_docs/version-2.11/intro.md @@ -0,0 +1,34 @@ +--- +slug: / +id: intro +--- +# Introduction + +Welcome to the documentation of Constellation! Constellation is a Kubernetes engine that aims to provide the best possible data security. + +![Constellation concept](/img/concept.svg) + + Constellation shields your entire Kubernetes cluster from the underlying cloud infrastructure. Everything inside is always encrypted, including at runtime in memory. For this, Constellation leverages a technology called *confidential computing* and more specifically Confidential VMs. + +:::tip +See the 📄[whitepaper](https://content.edgeless.systems/hubfs/Confidential%20Computing%20Whitepaper.pdf) for more information on confidential computing. +::: + +## Goals + +From a security perspective, Constellation is designed to keep all data always encrypted and to prevent any access from the underlying (cloud) infrastructure. This includes access from datacenter employees, privileged cloud admins, and attackers coming through the infrastructure. Such attackers could be malicious co-tenants escalating their privileges or hackers who managed to compromise a cloud server. + +From a DevOps perspective, Constellation is designed to work just like what you would expect from a modern Kubernetes engine. + +## Use cases + +Constellation provides unique security [features](overview/confidential-kubernetes.md) and [benefits](overview/security-benefits.md). The core use cases are: + +* Increasing the overall security of your clusters +* Increasing the trustworthiness of your SaaS offerings +* Moving sensitive workloads from on-prem to the cloud +* Meeting regulatory requirements + +## Next steps + +You can learn more about the concept of Confidential Kubernetes, features, security benefits, and performance of Constellation in the *Basics* section. To jump right into the action head to *Getting started*. diff --git a/docs/versioned_docs/version-2.11/overview/clouds.md b/docs/versioned_docs/version-2.11/overview/clouds.md new file mode 100644 index 000000000..3ccbb0d6d --- /dev/null +++ b/docs/versioned_docs/version-2.11/overview/clouds.md @@ -0,0 +1,58 @@ +# Feature status of clouds + +What works on which cloud? Currently, Confidential VMs (CVMs) are available in varying quality on the different clouds and software stacks. + +For Constellation, the ideal environment provides the following: + +1. Ability to run arbitrary software and images inside CVMs +2. CVMs based on AMD SEV-SNP (available in EPYC CPUs since the Milan generation) or, in the future, Intel TDX (available in Xeon CPUs from the Sapphire Rapids generation onward) +3. Ability for CVM guests to obtain raw hardware attestation statements +4. Reviewable, open-source firmware inside CVMs +5. Capability of the firmware to attest the integrity of the code it passes control to, e.g., with an embedded virtual TPM (vTPM) + +(1) is a functional must-have. (2)--(5) are required for remote attestation that fully keeps the infrastructure/cloud out. Constellation can work without them or with approximations, but won't protect against certain privileged attackers anymore. + +The following table summarizes the state of features for different infrastructures as of June 2023. + +| **Feature** | **Azure** | **GCP** | **AWS** | **OpenStack (Yoga)** | +|-----------------------------------|-----------|---------|---------|----------------------| +| **1. Custom images** | Yes | Yes | Yes | Yes | +| **2. SEV-SNP or TDX** | Yes | Yes | Yes | Depends on kernel/HV | +| **3. Raw guest attestation** | Yes | Yes | Yes | Depends on kernel/HV | +| **4. Reviewable firmware** | No* | No | Yes | Depends on kernel/HV | +| **5. Confidential measured boot** | Yes | No | No | Depends on kernel/HV | + +## Microsoft Azure + +With its [CVM offering](https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview), Azure provides the best foundations for Constellation. +Regarding (3), Azure provides direct access to remote-attestation statements. +The CVM firmware running in VM Privilege Level (VMPL) 0 provides a vTPM (5), but it's closed source (4). +This firmware is signed by Azure. +The signature is reflected in the remote-attestation statements of CVMs. +Thus, the Azure closed-source firmware becomes part of Constellation's trusted computing base (TCB). + +\* Recently, Azure [announced](https://techcommunity.microsoft.com/t5/azure-confidential-computing/azure-confidential-vms-using-sev-snp-dcasv5-ecasv5-are-now/ba-p/3573747) the *limited preview* of CVMs with customizable firmware. With this CVM type, (4) switches from *No* to *Yes*. Constellation will support customizable firmware on Azure in the future. + +## Google Cloud Platform (GCP) + +The [CVMs Generally Available in GCP](https://cloud.google.com/compute/confidential-vm/docs/create-confidential-vm-instance) are based on AMD SEV but don't have SNP features enabled. +CVMs with SEV-SNP enabled are currently in [private preview](https://cloud.google.com/blog/products/identity-security/rsa-snp-vm-more-confidential). Regarding (3), with their SEV-SNP offering Google provides direct access to remote-attestation statements. +However, regarding (4), the CVMs still include closed-source firmware. + +Intel and Google have [collaborated](https://cloud.google.com/blog/products/identity-security/rsa-google-intel-confidential-computing-more-secure) to enhance the security of TDX, and have recently [revealed](https://venturebeat.com/security/intel-launches-confidential-computing-solution-for-virtual-machines/) their plans to make TDX compatible with Google Cloud. + +## Amazon Web Services (AWS) + +Amazon EC2 [supports AMD SEV-SNP](https://aws.amazon.com/de/about-aws/whats-new/2023/04/amazon-ec2-amd-sev-snp/). +Regarding (3), AWS provides direct access to remote-attestation statements. +However, regarding (5), attestation is partially based on the [NitroTPM](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nitrotpm.html) for [measured boot](../architecture/attestation.md#measured-boot), which is a vTPM managed by the Nitro hypervisor. +Hence, the hypervisor is currently part of Constellation's TCB. +Regarding (4), the [firmware is open source](https://github.com/aws/uefi) and can be reproducibly built. + +## OpenStack + +OpenStack is an open-source cloud and infrastructure management software. It's used by many smaller CSPs and datacenters. In the latest *Yoga* version, OpenStack has basic support for CVMs. However, much depends on the employed kernel and hypervisor. Features (2)--(4) are likely to be a *Yes* with Linux kernel version 6.2. Thus, going forward, OpenStack on corresponding AMD or Intel hardware will be a viable underpinning for Constellation. + +## Conclusion + +The different clouds and software like the Linux kernel and OpenStack are in the process of building out their support for state-of-the-art CVMs. Azure has already most features in place. For Constellation, the status quo means that the TCB has different shapes on different infrastructures. With broad SEV-SNP support coming to the Linux kernel, we soon expect a normalization of features across infrastructures. diff --git a/docs/versioned_docs/version-2.11/overview/confidential-kubernetes.md b/docs/versioned_docs/version-2.11/overview/confidential-kubernetes.md new file mode 100644 index 000000000..2b6c6ed17 --- /dev/null +++ b/docs/versioned_docs/version-2.11/overview/confidential-kubernetes.md @@ -0,0 +1,42 @@ +# Confidential Kubernetes + +We use the term *Confidential Kubernetes* to refer to the concept of using confidential-computing technology to shield entire Kubernetes clusters from the infrastructure. The three defining properties of this concept are: + +1. **Workload shielding**: the confidentiality and integrity of all workload-related data and code are enforced. +2. **Control plane shielding**: the confidentiality and integrity of the cluster's control plane, state, and workload configuration are enforced. +3. **Attestation and verifiability**: the two properties above can be verified remotely based on hardware-rooted cryptographic certificates. + +Each of the above properties is equally important. Only with all three in conjunction, an entire cluster can be shielded without gaps. + +## Constellation security features + +Constellation implements the Confidential Kubernetes concept with the following security features. + +* **Runtime encryption**: Constellation runs all Kubernetes nodes inside Confidential VMs (CVMs). This gives runtime encryption for the entire cluster. +* **Network and storage encryption**: Constellation augments this with transparent encryption of the [network](../architecture/networking.md) and [persistent storage](../architecture/encrypted-storage.md). Thus, workloads and control plane are truly end-to-end encrypted: at rest, in transit, and at runtime. +* **Transparent key management**: Constellation manages the corresponding [cryptographic keys](../architecture/keys.md) inside CVMs. +* **Node attestation and verification**: Constellation verifies the integrity of each new CVM-based node using [remote attestation](../architecture/attestation.md). Only "good" nodes receive the cryptographic keys required to access the network and storage of a cluster. +* **Confidential computing-optimized images**: A node is "good" if it's running a signed Constellation [node image](../architecture/images.md) inside a CVM and is in the expected state. (Node images are hardware-measured during boot. The measurements are reflected in the attestation statements that are produced by nodes and verified by Constellation.) +* **"Whole cluster" attestation**: Towards the DevOps engineer, Constellation provides a single hardware-rooted certificate from which all of the above can be verified. + +With the above, Constellation wraps an entire cluster into one coherent and verifiable *confidential context*. The concept is depicted in the following. + +![Confidential Kubernetes](../_media/concept-constellation.svg) + +## Contrast: Managed Kubernetes with CVMs + +In contrast, managed Kubernetes with CVMs, as it's for example offered in [AKS](https://azure.microsoft.com/en-us/services/kubernetes-service/) and [GKE](https://cloud.google.com/kubernetes-engine), only provides runtime encryption for certain worker nodes. Here, each worker node is a separate (and typically unverified) confidential context. This only provides limited security benefits as it only prevents direct access to a worker node's memory. The large majority of potential attacks through the infrastructure remain unaffected. This includes attacks through the control plane, access to external key management, and the corruption of worker node images. This leaves many problems unsolved. For instance, *Node A* has no means to verify if *Node B* is "good" and if it's OK to share data with it. Consequently, this approach leaves a large attack surface, as is depicted in the following. + +![Concept: Managed Kubernetes plus CVMs](../_media/concept-managed.svg) + +The following table highlights the key differences in terms of features. + +| | Managed Kubernetes with CVMs | Confidential Kubernetes (Constellation✨) | +|-------------------------------------|------------------------------|--------------------------------------------| +| Runtime encryption | Partial (data plane only)| **Yes** | +| Node image verification | No | **Yes** | +| Full cluster attestation | No | **Yes** | +| Transparent network encryption | No | **Yes** | +| Transparent storage encryption | No | **Yes** | +| Confidential key management | No | **Yes** | +| Cloud agnostic / multi-cloud | No | **Yes** | diff --git a/docs/versioned_docs/version-2.11/overview/license.md b/docs/versioned_docs/version-2.11/overview/license.md new file mode 100644 index 000000000..330eb0166 --- /dev/null +++ b/docs/versioned_docs/version-2.11/overview/license.md @@ -0,0 +1,23 @@ +# License + +## Source code + +Constellation's source code is available on [GitHub](https://github.com/edgelesssys/constellation) under the [GNU Affero General Public License v3.0](https://github.com/edgelesssys/constellation/blob/main/LICENSE). + +## Binaries + +Edgeless Systems provides ready-to-use and [signed](../architecture/attestation.md#chain-of-trust) binaries of Constellation. This includes the CLI and the [node images](../architecture/images.md). + +These binaries may be used free of charge within the bounds of Constellation's [**Community License**](#community-license). An [**Enterprise License**](#enterprise-license) can be purchased from Edgeless Systems. + +The Constellation CLI displays relevant license information when you initialize your cluster. You are responsible for staying within the bounds of your respective license. Constellation doesn't enforce any limits so as not to endanger your cluster's availability. + +### Community License + +You are free to use the Constellation binaries provided by Edgeless Systems to create services for internal consumption, evaluation purposes, or non-commercial use. You must not use the Constellation binaries to provide commercial hosted services to third parties. Edgeless Systems gives no warranties and offers no support. + +### Enterprise License + +Enterprise Licenses don't have the above limitations and come with support and additional features. Find out more at the [product website](https://www.edgeless.systems/products/constellation/). + +Once you have received your Enterprise License file, place it in your [Constellation workspace](../architecture/orchestration.md#workspaces) in a file named `constellation.license`. diff --git a/docs/versioned_docs/version-2.11/overview/performance/application.md b/docs/versioned_docs/version-2.11/overview/performance/application.md new file mode 100644 index 000000000..c67d59644 --- /dev/null +++ b/docs/versioned_docs/version-2.11/overview/performance/application.md @@ -0,0 +1,102 @@ +# Application benchmarks + +## HashiCorp Vault + +[HashiCorp Vault](https://www.vaultproject.io/) is a distributed secrets management software that can be deployed to Kubernetes. +HashiCorp maintains a benchmarking tool for vault, [vault-benchmark](https://github.com/hashicorp/vault-benchmark/). +Vault-benchmark generates load on a Vault deployment and measures response times. + +This article describes the results from running vault-benchmark on Constellation, AKS, and GKE. +You can find the setup for producing the data discussed in this article in the [vault-benchmarks](https://github.com/edgelesssys/vault-benchmarks) repository. + +The Vault API used during benchmarking is the [transits secret engine](https://developer.hashicorp.com/vault/docs/secrets/transit). +This allows services to send data to Vault for encryption, decryption, signing, and verification. + +## Results + +On each run, vault-benchmark sends requests and measures the latencies. +The measured latencies are aggregated through various statistical features. +After running the benchmark n times, the arithmetic mean over a subset of the reported statistics is calculated. +The selected features are arithmetic mean, 99th percentile, minimum, and maximum. + +Arithmetic mean gives a general sense of the latency on each target. +The 99th percentile shows performance in (most likely) erroneous states. +Minimum and maximum mark the range within which latency varies each run. + +The benchmark was configured with 1300 workers and 10 seconds per run. +Those numbers were chosen empirically. +The latency was stabilizing at 10 seconds runtime, not changing with further increase. +Increasing the number of workers beyond 1300 leads to request failures, marking the limit Vault was able to handle in this setup. +All results are based on 100 runs. + +The following data was generated while running five replicas, one primary, and four standby nodes. +All numbers are in seconds if not indicated otherwise. +``` +========== Results AKS ========== +Mean: mean: 1.632200, variance: 0.002057 +P99: mean: 5.480679, variance: 2.263700 +Max: mean: 6.651001, variance: 2.808401 +Min: mean: 0.011415, variance: 0.000133 +========== Results GKE ========== +Mean: mean: 1.656435, variance: 0.003615 +P99: mean: 6.030807, variance: 3.955051 +Max: mean: 7.164843, variance: 3.300004 +Min: mean: 0.010233, variance: 0.000111 +========== Results C11n ========== +Mean: mean: 1.651549, variance: 0.001610 +P99: mean: 5.780422, variance: 3.016106 +Max: mean: 6.942997, variance: 3.075796 +Min: mean: 0.013774, variance: 0.000228 +========== AKS vs C11n ========== +Mean: +1.171577 % (AKS is faster) +P99: +5.185495 % (AKS is faster) +Max: +4.205618 % (AKS is faster) +Min: +17.128781 % (AKS is faster) +========== GKE vs C11n ========== +Mean: -0.295851 % (GKE is slower) +P99: -4.331603 % (GKE is slower) +Max: -3.195248 % (GKE is slower) +Min: +25.710886 % (GKE is faster) +``` + +**Interpretation**: Latencies are all within ~5% of each other. +AKS performs slightly better than GKE and Constellation (C11n) in all cases except minimum latency. +Minimum latency is the lowest for GKE. +Compared to GKE, Constellation had slightly lower peak latencies (99th percentile and maximum), indicating that Constellation could have handled slightly more concurrent accesses than GKE. +Overall, performance is at comparable levels across all three distributions. +Based on these numbers, you can use a similarly sized Constellation cluster to run your existing Vault deployment. + +### Visualization + +The following plots visualize the data presented above as [box plots](https://en.wikipedia.org/wiki/Box_plot). +The whiskers denote the minimum and maximum. +The box stretches from the 25th to the 75th percentile, with the dividing bar marking the 50th percentile. +The circles outside the whiskers denote outliers. + +
+Mean Latency + +![Mean Latency](../../_media/benchmark_vault/5replicas/mean_latency.png) + +
+ +
+99th Percentile Latency + +![99th Percentile Latency](../../_media/benchmark_vault/5replicas/p99_latency.png) + +
+ +
+Maximum Latency + +![Maximum Latency](../../_media/benchmark_vault/5replicas/max_latency.png) + +
+ +
+Minimum Latency + +![Minimum Latency](../../_media/benchmark_vault/5replicas/min_latency.png) + +
diff --git a/docs/versioned_docs/version-2.11/overview/performance/io.md b/docs/versioned_docs/version-2.11/overview/performance/io.md new file mode 100644 index 000000000..dc7cf3d8b --- /dev/null +++ b/docs/versioned_docs/version-2.11/overview/performance/io.md @@ -0,0 +1,204 @@ +# I/O performance benchmarks + +To assess the overall performance of Constellation, this benchmark evaluates Constellation v2.6.0 in terms of storage I/O using [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) and network performance using the [Kubernetes Network Benchmark](https://github.com/InfraBuilder/k8s-bench-suite#knb--kubernetes-network-be). + +This benchmark tested Constellation on Azure and GCP and compared the results against the managed Kubernetes offerings AKS and GKE. + +## Configurations + +### Constellation + +The benchmark was conducted with Constellation v2.6.0, Kubernetes v1.25.7, and Cilium v1.12. +It ran on the following infrastructure configurations. + +Constellation on Azure: + +- Nodes: 3 (1 Control-plane, 2 Worker) +- Machines: `DC4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory +- CVM: `true` +- Region: `West US` +- Zone: `2` + +Constellation on GCP: + +- Nodes: 3 (1 Control-plane, 2 Worker) +- Machines: `n2d-standard-4`: 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory +- CVM: `true` +- Zone: `europe-west3-b` + +### AKS + +On AKS, the benchmark used Kubernetes `v1.24.9` and nodes with version `AKSUbuntu-1804gen2containerd-2023.02.15`. +AKS ran with the [`kubenet`](https://learn.microsoft.com/en-us/azure/aks/concepts-network#kubenet-basic-networking) CNI and the [default CSI driver](https://learn.microsoft.com/en-us/azure/aks/azure-disk-csi) for Azure Disk. + +The following infrastructure configurations was used: + +- Nodes: 2 (2 Worker) +- Machines: `D4as_v5`: 3rd Generation AMD EPYC 7763v (Milan) processor with 4 Cores, 16 GiB memory +- CVM: `false` +- Region: `West US` +- Zone: `2` + +### GKE + +On GKE, the benchmark used Kubernetes `v1.24.9` and nodes with version `1.24.9-gke.3200`. +GKE ran with the [`kubenet`](https://cloud.google.com/kubernetes-engine/docs/concepts/network-overview) CNI and the [default CSI driver](https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver) for Compute Engine persistent disk. + +The following infrastructure configurations was used: + +- Nodes: 2 (2 Worker) +- Machines: `n2d-standard-4` 2nd Generation AMD EPYC (Rome) processor with 4 Cores, 16 GiB of memory +- CVM: `false` +- Zone: `europe-west3-b` + +## Results + +### Network + +This section gives a thorough analysis of the network performance of Constellation, specifically focusing on measuring TCP and UDP bandwidth. +The benchmark measured the bandwidth of pod-to-pod and pod-to-service connections between two different nodes using [`iperf`](https://iperf.fr/). + +GKE and Constellation on GCP had a maximum network bandwidth of [10 Gbps](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machineshttps://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines). +AKS with `Standard_D4as_v5` machines a maximum network bandwidth of [12.5 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series). +The Confidential VM equivalent `Standard_DC4as_v5` currently has a network bandwidth of [1.25 Gbps](https://learn.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series#dcasv5-series-products). +Therefore, to make the test comparable, both AKS and Constellation on Azure were running with `Standard_DC4as_v5` machines and 1.25 Gbps bandwidth. + +Constellation on Azure and AKS used an MTU of 1500. +Constellation on GCP used an MTU of 8896. GKE used an MTU of 1450. + +The difference in network bandwidth can largely be attributed to two factors. + +- Constellation's [network encryption](../../architecture/networking.md) via Cilium and WireGuard, which protects data in-transit. +- [AMD SEV using SWIOTLB bounce buffers](https://lore.kernel.org/all/20200204193500.GA15564@ashkalra_ubuntu_server/T/) for all DMA including network I/O. + +#### Pod-to-Pod + +In this scenario, the client Pod connects directly to the server pod via its IP address. + +```mermaid +flowchart LR + subgraph Node A + Client[Client] + end + subgraph Node B + Server[Server] + end + Client ==>|traffic| Server +``` + +The results for "Pod-to-Pod" on Azure are as follows: + +![Network Pod2Pod Azure benchmark graph](../../_media/benchmark_net_p2p_azure.png) + +The results for "Pod-to-Pod" on GCP are as follows: + +![Network Pod2Pod GCP benchmark graph](../../_media/benchmark_net_p2p_gcp.png) + +#### Pod-to-Service + +In this scenario, the client Pod connects to the server Pod via a ClusterIP service. This is more relevant to real-world use cases. + +```mermaid +flowchart LR + subgraph Node A + Client[Client] ==>|traffic| Service[Service] + end + subgraph Node B + Server[Server] + end + Service ==>|traffic| Server +``` + +The results for "Pod-to-Pod" on Azure are as follows: + +![Network Pod2SVC Azure benchmark graph](../../_media/benchmark_net_p2svc_azure.png) + +The results for "Pod-to-Pod" on GCP are as follows: + +![Network Pod2SVC GCP benchmark graph](../../_media/benchmark_net_p2svc_gcp.png) + +In our recent comparison of Constellation on GCP with GKE, Constellation has 58% less TCP bandwidth. However, UDP bandwidth was slightly better with Constellation, thanks to its higher MTU. + +Similarly, when comparing Constellation on Azure with AKS using CVMs, Constellation achieved approximately 10% less TCP and 40% less UDP bandwidth. + +### Storage I/O + +Azure and GCP offer persistent storage for their Kubernetes services AKS and GKE via the Container Storage Interface (CSI). CSI storage in Kubernetes is available via `PersistentVolumes` (PV) and consumed via `PersistentVolumeClaims` (PVC). +Upon requesting persistent storage through a PVC, GKE and AKS will provision a PV as defined by a default [storage class](https://kubernetes.io/docs/concepts/storage/storage-classes/). +Constellation provides persistent storage on Azure and GCP [that's encrypted on the CSI layer](../../architecture/encrypted-storage.md). +Similarly, upon a PVC request, Constellation will provision a PV via a default storage class. + +For Constellation on Azure and AKS, the benchmark ran with Azure Disk storage [Standard SSD](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) of 400 GiB size. +The [DC4as machine type](https://learn.microsoft.com/en-us/azure/virtual-machines/dasv5-dadsv5-series#dasv5-series) with four cores provides the following maximum performance: + +- 6400 (20000 burst) IOPS +- 144 MB/s (600 MB/s burst) throughput + +However, the performance is bound by the capabilities of the [512 GiB Standard SSD size](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) (the size class of 400 GiB volumes): + +- 500 (600 burst) IOPS +- 60 MB/s (150 MB/s burst) throughput + +For Constellation on GCP and GKE, the benchmark ran with Compute Engine Persistent Disk Storage [pd-balanced](https://cloud.google.com/compute/docs/disks) of 400 GiB size. +The N2D machine type with four cores and pd-balanced provides the following [maximum performance](https://cloud.google.com/compute/docs/disks/performance#n2d_vms): + +- 3,000 read IOPS +- 15,000 write IOPS +- 240 MB/s read throughput +- 240 MB/s write throughput + +However, the performance is bound by the capabilities of a [`Zonal balanced PD`](https://cloud.google.com/compute/docs/disks/performance#zonal-persistent-disks) with 400 GiB size: + +- 2400 read IOPS +- 2400 write IOPS +- 112 MB/s read throughput +- 112 MB/s write throughput + +The [`fio`](https://fio.readthedocs.io/en/latest/fio_doc.html) benchmark consists of several tests. +The benchmark used [`Kubestr`](https://github.com/kastenhq/kubestr) to run `fio` in Kubernetes. +The default test performs randomized access patterns that accurately depict worst-case I/O scenarios for most applications. + +The following `fio` settings were used: + +- No Cloud caching +- No OS caching +- Single CPU +- 60 seconds runtime +- 10 seconds ramp-up time +- 10 GiB file +- IOPS: 4 KB blocks and 128 iodepth +- Bandwidth: 1024 KB blocks and 128 iodepth + +For more details, see the [`fio` test configuration](https://github.com/edgelesssys/constellation/blob/main/.github/actions/e2e_benchmark/fio.ini). + +The results for IOPS on Azure are as follows: + +![I/O IOPS Azure benchmark graph](../../_media/benchmark_fio_azure_iops.png) + +The results for IOPS on GCP are as follows: + +![I/O IOPS GCP benchmark graph](../../_media/benchmark_fio_gcp_iops.png) + +The results for bandwidth on Azure are as follows: + +![I/O bandwidth Azure benchmark graph](../../_media/benchmark_fio_azure_bw.png) + +The results for bandwidth on GCP are as follows: + +![I/O bandwidth GCP benchmark graph](../../_media/benchmark_fio_gcp_bw.png) + +On GCP, the results exceed the maximum performance guarantees of the chosen disk type. There are two possible explanations for this. The first is that there may be cloud caching in place that isn't configurable. Alternatively, the underlying provisioned disk size may be larger than what was requested, resulting in higher performance boundaries. + +When comparing Constellation on GCP with GKE, Constellation has similar bandwidth but about 10% less IOPS performance. On Azure, Constellation has similar IOPS performance compared to AKS, where both likely hit the maximum storage performance. However, Constellation has approximately 15% less read and write bandwidth. + +## Conclusion + +Despite the added [security benefits](../security-benefits.md) that Constellation provides, it only incurs a slight performance overhead when compared to managed Kubernetes offerings such as AKS and GKE. In most compute benchmarks, Constellation is on par with it's alternatives. +While it may be slightly slower in certain I/O scenarios due to network and storage encryption, there is ongoing work to reduce this overhead to single digits. + +For instance, storage encryption only adds between 10% to 15% overhead in terms of bandwidth and IOPS. +Meanwhile, the biggest performance impact that Constellation currently faces is network encryption, which can incur up to 58% overhead on a 10 Gbps network. +However, the Cilium team has conducted [benchmarks with Cilium using WireGuard encryption](https://docs.cilium.io/en/latest/operations/performance/benchmark/#encryption-wireguard-ipsec) on a 100 Gbps network that yielded over 15 Gbps. +We're confident that Constellation will provide a similar level of performance with an upcoming release. + +Overall, Constellation strikes a great balance between security and performance, and we're continuously working to improve its performance capabilities while maintaining its high level of security. diff --git a/docs/versioned_docs/version-2.11/overview/performance/performance.md b/docs/versioned_docs/version-2.11/overview/performance/performance.md new file mode 100644 index 000000000..7f22a693e --- /dev/null +++ b/docs/versioned_docs/version-2.11/overview/performance/performance.md @@ -0,0 +1,25 @@ +# Performance analysis of Constellation + +This section provides a comprehensive examination of the performance characteristics of Constellation, encompassing various aspects, including runtime encryption, I/O benchmarks, and real-world applications. + +## Impact of runtime encryption on performance + +All nodes in a Constellation cluster are executed inside Confidential VMs (CVMs). Consequently, the performance of Constellation is inherently linked to the performance of these CVMs. + +### AMD and Azure benchmarking + +AMD and Azure have collectively released a [performance benchmark](https://community.amd.com/t5/business/microsoft-azure-confidential-computing-powered-by-3rd-gen-epyc/ba-p/497796) for CVMs that utilize 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. This benchmark, which included a variety of mostly compute-intensive tests such as SPEC CPU 2017 and CoreMark, demonstrated that CVMs experience only minor performance degradation (ranging from 2% to 8%) when compared to standard VMs. Such results are indicative of the performance that can be expected from compute-intensive workloads running with Constellation on Azure. + +### AMD and Google benchmarking + +Similarly, AMD and Google have jointly released a [performance benchmark](https://www.amd.com/system/files/documents/3rd-gen-epyc-gcp-c2d-conf-compute-perf-brief.pdf) for CVMs employing 3rd Gen AMD EPYC processors (Milan) with SEV-SNP. With high-performance computing workloads such as WRF, NAMD, Ansys CFS, and Ansys LS_DYNA, they observed analogous findings, with only minor performance degradation (between 2% and 4%) compared to standard VMs. These outcomes are reflective of the performance that can be expected for compute-intensive workloads running with Constellation on GCP. + +## I/O performance benchmarks + +We evaluated the [I/O performance](io.md) of Constellation, utilizing a collection of synthetic benchmarks targeting networking and storage. +We further compared this performance to native managed Kubernetes offerings from various cloud providers, to better understand how Constellation stands in relation to standard practices. + +## Application benchmarking + +To gauge Constellation's applicability to well-known applications, we performed a [benchmark of HashiCorp Vault](application.md) running on Constellation. +The results were then compared to deployments on the managed Kubernetes offerings from different cloud providers, providing a tangible perspective on Constellation's performance in actual deployment scenarios. diff --git a/docs/versioned_docs/version-2.11/overview/product.md b/docs/versioned_docs/version-2.11/overview/product.md new file mode 100644 index 000000000..ba7181aa9 --- /dev/null +++ b/docs/versioned_docs/version-2.11/overview/product.md @@ -0,0 +1,11 @@ +# Product features + +Constellation is a Kubernetes engine that aims to provide the best possible data security in combination with enterprise-grade scalability and reliability features---and a smooth user experience. + +From a security perspective, Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and corresponding security features, which shield your entire cluster from the underlying infrastructure. + +From an operational perspective, Constellation provides the following key features: + +* **Native support for different clouds**: Constellation works on Microsoft Azure, Google Cloud Platform (GCP), and Amazon Web Services (AWS). Support for OpenStack-based environments is coming with a future release. Constellation securely interfaces with the cloud infrastructure to provide [cluster autoscaling](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler), [dynamic persistent volumes](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/), and [service load balancing](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +* **High availability**: Constellation uses a [multi-master architecture](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/) with a [stacked etcd topology](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/ha-topology/#stacked-etcd-topology) to ensure high availability. +* **Integrated Day-2 operations**: Constellation lets you securely [upgrade](../workflows/upgrade.md) your cluster to a new release. It also lets you securely [recover](../workflows/recovery.md) a failed cluster. Both with a single command. diff --git a/docs/versioned_docs/version-2.11/overview/security-benefits.md b/docs/versioned_docs/version-2.11/overview/security-benefits.md new file mode 100644 index 000000000..51a8b64f5 --- /dev/null +++ b/docs/versioned_docs/version-2.11/overview/security-benefits.md @@ -0,0 +1,22 @@ +# Security benefits and threat model + +Constellation implements the [Confidential Kubernetes](confidential-kubernetes.md) concept and shields entire Kubernetes deployments from the infrastructure. More concretely, Constellation decreases the size of the trusted computing base (TCB) of a Kubernetes deployment. The TCB is the totality of elements in a computing environment that must be trusted not to be compromised. A smaller TCB results in a smaller attack surface. The following diagram shows how Constellation removes the *cloud & datacenter infrastructure* and the *physical hosts*, including the hypervisor, the host OS, and other components, from the TCB (red). Inside the confidential context (green), Kubernetes remains part of the TCB, but its integrity is attested and can be [verified](../workflows/verify-cluster.md). + +![TCB comparison](../_media/tcb.svg) + +Given this background, the following describes the concrete threat classes that Constellation addresses. + +## Insider access + +Employees and third-party contractors of cloud service providers (CSPs) have access to different layers of the cloud infrastructure. +This opens up a large attack surface where workloads and data can be read, copied, or manipulated. With Constellation, Kubernetes deployments are shielded from the infrastructure and thus such accesses are prevented. + +## Infrastructure-based attacks + +Malicious cloud users ("hackers") may break out of their tenancy and access other tenants' data. Advanced attackers may even be able to establish a permanent foothold within the infrastructure and access data over a longer period. Analogously to the *insider access* scenario, Constellation also prevents access to a deployment's data in this scenario. + +## Supply chain attacks + +Supply chain security is receiving lots of attention recently due to an [increasing number of recorded attacks](https://www.enisa.europa.eu/news/enisa-news/understanding-the-increase-in-supply-chain-security-attacks). For instance, a malicious actor could attempt to tamper Constellation node images (including Kubernetes and other software) before they're loaded in the confidential VMs of a cluster. Constellation uses [remote attestation](../architecture/attestation.md) in conjunction with public [transparency logs](../workflows/verify-cli.md) to prevent this. + +In the future, Constellation will extend this feature to customer workloads. This will enable cluster owners to create auditable policies that precisely define which containers can run in a given deployment. diff --git a/docs/versioned_docs/version-2.11/reference/cli.md b/docs/versioned_docs/version-2.11/reference/cli.md new file mode 100644 index 000000000..2d9a8ebff --- /dev/null +++ b/docs/versioned_docs/version-2.11/reference/cli.md @@ -0,0 +1,806 @@ +# CLI reference + + + +Use the Constellation CLI to create and manage your clusters. + +Usage: + +``` +constellation [command] +``` +Commands: + +* [config](#constellation-config): Work with the Constellation configuration file + * [generate](#constellation-config-generate): Generate a default configuration file + * [fetch-measurements](#constellation-config-fetch-measurements): Fetch measurements for configured cloud provider and image + * [instance-types](#constellation-config-instance-types): Print the supported instance types for all cloud providers + * [kubernetes-versions](#constellation-config-kubernetes-versions): Print the Kubernetes versions supported by this CLI + * [migrate](#constellation-config-migrate): Migrate a configuration file to a new version +* [create](#constellation-create): Create instances on a cloud platform for your Constellation cluster +* [init](#constellation-init): Initialize the Constellation cluster +* [mini](#constellation-mini): Manage MiniConstellation clusters + * [up](#constellation-mini-up): Create and initialize a new MiniConstellation cluster + * [down](#constellation-mini-down): Destroy a MiniConstellation cluster +* [status](#constellation-status): Show status of a Constellation cluster +* [verify](#constellation-verify): Verify the confidential properties of a Constellation cluster +* [upgrade](#constellation-upgrade): Find and apply upgrades to your Constellation cluster + * [check](#constellation-upgrade-check): Check for possible upgrades + * [apply](#constellation-upgrade-apply): Apply an upgrade to a Constellation cluster +* [recover](#constellation-recover): Recover a completely stopped Constellation cluster +* [terminate](#constellation-terminate): Terminate a Constellation cluster +* [iam](#constellation-iam): Work with the IAM configuration on your cloud provider + * [create](#constellation-iam-create): Create IAM configuration on a cloud platform for your Constellation cluster + * [aws](#constellation-iam-create-aws): Create IAM configuration on AWS for your Constellation cluster + * [azure](#constellation-iam-create-azure): Create IAM configuration on Microsoft Azure for your Constellation cluster + * [gcp](#constellation-iam-create-gcp): Create IAM configuration on GCP for your Constellation cluster + * [destroy](#constellation-iam-destroy): Destroy an IAM configuration and delete local Terraform files + * [upgrade](#constellation-iam-upgrade): Find and apply upgrades to your IAM profile + * [apply](#constellation-iam-upgrade-apply): Apply an upgrade to an IAM profile +* [version](#constellation-version): Display version of this CLI + +## constellation config + +Work with the Constellation configuration file + +### Synopsis + +Work with the Constellation configuration file. + +### Options + +``` + -h, --help help for config +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation config generate + +Generate a default configuration file + +### Synopsis + +Generate a default configuration file for your selected cloud provider. + +``` +constellation config generate {aws|azure|gcp|openstack|qemu|stackit} [flags] +``` + +### Options + +``` + -a, --attestation string attestation variant to use {aws-sev-snp|aws-nitro-tpm|azure-sev-snp|azure-trustedlaunch|gcp-sev-es|qemu-vtpm}. If not specified, the default for the cloud provider is used + -h, --help help for generate + -k, --kubernetes string Kubernetes version to use in format MAJOR.MINOR (default "v1.27") +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation config fetch-measurements + +Fetch measurements for configured cloud provider and image + +### Synopsis + +Fetch measurements for configured cloud provider and image. + +A config needs to be generated first. + +``` +constellation config fetch-measurements [flags] +``` + +### Options + +``` + -h, --help help for fetch-measurements + -s, --signature-url string alternative URL to fetch measurements' signature from + -u, --url string alternative URL to fetch measurements from +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation config instance-types + +Print the supported instance types for all cloud providers + +### Synopsis + +Print the supported instance types for all cloud providers. + +``` +constellation config instance-types [flags] +``` + +### Options + +``` + -h, --help help for instance-types +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation config kubernetes-versions + +Print the Kubernetes versions supported by this CLI + +### Synopsis + +Print the Kubernetes versions supported by this CLI. + +``` +constellation config kubernetes-versions [flags] +``` + +### Options + +``` + -h, --help help for kubernetes-versions +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation config migrate + +Migrate a configuration file to a new version + +### Synopsis + +Migrate a configuration file to a new version. + +``` +constellation config migrate [flags] +``` + +### Options + +``` + -h, --help help for migrate +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation create + +Create instances on a cloud platform for your Constellation cluster + +### Synopsis + +Create instances on a cloud platform for your Constellation cluster. + +``` +constellation create [flags] +``` + +### Options + +``` + -h, --help help for create + -y, --yes create the cluster without further confirmation +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation init + +Initialize the Constellation cluster + +### Synopsis + +Initialize the Constellation cluster. + +Start your confidential Kubernetes. + +``` +constellation init [flags] +``` + +### Options + +``` + --conformance enable conformance mode + -h, --help help for init + --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config + --skip-helm-wait install helm charts without waiting for deployments to be ready +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation mini + +Manage MiniConstellation clusters + +### Synopsis + +Manage MiniConstellation clusters. + +### Options + +``` + -h, --help help for mini +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation mini up + +Create and initialize a new MiniConstellation cluster + +### Synopsis + +Create and initialize a new MiniConstellation cluster. + +A mini cluster consists of a single control-plane and worker node, hosted using QEMU/KVM. + +``` +constellation mini up [flags] +``` + +### Options + +``` + -h, --help help for up + --merge-kubeconfig merge Constellation kubeconfig file with default kubeconfig file in $HOME/.kube/config (default true) +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation mini down + +Destroy a MiniConstellation cluster + +### Synopsis + +Destroy a MiniConstellation cluster. + +``` +constellation mini down [flags] +``` + +### Options + +``` + -h, --help help for down + -y, --yes terminate the cluster without further confirmation +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation status + +Show status of a Constellation cluster + +### Synopsis + +Show the status of a constellation cluster. + +Shows microservice, image, and Kubernetes versions installed in the cluster. Also shows status of current version upgrades. + +``` +constellation status [flags] +``` + +### Options + +``` + -h, --help help for status +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation verify + +Verify the confidential properties of a Constellation cluster + +### Synopsis + +Verify the confidential properties of a Constellation cluster. +If arguments aren't specified, values are read from `constellation-id.json`. + +``` +constellation verify [flags] +``` + +### Options + +``` + --cluster-id string expected cluster identifier + -h, --help help for verify + -e, --node-endpoint string endpoint of the node to verify, passed as HOST[:PORT] + --raw print raw attestation document +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation upgrade + +Find and apply upgrades to your Constellation cluster + +### Synopsis + +Find and apply upgrades to your Constellation cluster. + +### Options + +``` + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation upgrade check + +Check for possible upgrades + +### Synopsis + +Check which upgrades can be applied to your Constellation Cluster. + +``` +constellation upgrade check [flags] +``` + +### Options + +``` + -h, --help help for check + --ref string the reference to use for querying new versions (default "-") + --stream string the stream to use for querying new versions (default "stable") + -u, --update-config update the specified config file with the suggested versions +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation upgrade apply + +Apply an upgrade to a Constellation cluster + +### Synopsis + +Apply an upgrade to a Constellation cluster by applying the chosen configuration. + +``` +constellation upgrade apply [flags] +``` + +### Options + +``` + --conformance enable conformance mode + -h, --help help for apply + --skip-helm-wait install helm charts without waiting for deployments to be ready + --skip-phases strings comma-separated list of upgrade phases to skip + one or multiple of { infrastructure | helm | image | k8s } + -y, --yes run upgrades without further confirmation + WARNING: might delete your resources in case you are using cert-manager in your cluster. Please read the docs. + WARNING: might unintentionally overwrite measurements in the running cluster. +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation recover + +Recover a completely stopped Constellation cluster + +### Synopsis + +Recover a Constellation cluster by sending a recovery key to an instance in the boot stage. + +This is only required if instances restart without other instances available for bootstrapping. + +``` +constellation recover [flags] +``` + +### Options + +``` + -e, --endpoint string endpoint of the instance, passed as HOST[:PORT] + -h, --help help for recover +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation terminate + +Terminate a Constellation cluster + +### Synopsis + +Terminate a Constellation cluster. + +The cluster can't be started again, and all persistent storage will be lost. + +``` +constellation terminate [flags] +``` + +### Options + +``` + -h, --help help for terminate + -y, --yes terminate the cluster without further confirmation +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation iam + +Work with the IAM configuration on your cloud provider + +### Synopsis + +Work with the IAM configuration on your cloud provider. + +### Options + +``` + -h, --help help for iam +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation iam create + +Create IAM configuration on a cloud platform for your Constellation cluster + +### Synopsis + +Create IAM configuration on a cloud platform for your Constellation cluster. + +### Options + +``` + -h, --help help for create + --update-config update the config file with the specific IAM information + -y, --yes create the IAM configuration without further confirmation +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation iam create aws + +Create IAM configuration on AWS for your Constellation cluster + +### Synopsis + +Create IAM configuration on AWS for your Constellation cluster. + +``` +constellation iam create aws [flags] +``` + +### Options + +``` + -h, --help help for aws + --prefix string name prefix for all resources (required) + --zone string AWS availability zone the resources will be created in, e.g., us-east-2a (required) + See the Constellation docs for a list of currently supported regions. +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + --update-config update the config file with the specific IAM information + -C, --workspace string path to the Constellation workspace + -y, --yes create the IAM configuration without further confirmation +``` + +## constellation iam create azure + +Create IAM configuration on Microsoft Azure for your Constellation cluster + +### Synopsis + +Create IAM configuration on Microsoft Azure for your Constellation cluster. + +``` +constellation iam create azure [flags] +``` + +### Options + +``` + -h, --help help for azure + --region string region the resources will be created in, e.g., westus (required) + --resourceGroup string name prefix of the two resource groups your cluster / IAM resources will be created in (required) + --servicePrincipal string name of the service principal that will be created (required) +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + --update-config update the config file with the specific IAM information + -C, --workspace string path to the Constellation workspace + -y, --yes create the IAM configuration without further confirmation +``` + +## constellation iam create gcp + +Create IAM configuration on GCP for your Constellation cluster + +### Synopsis + +Create IAM configuration on GCP for your Constellation cluster. + +``` +constellation iam create gcp [flags] +``` + +### Options + +``` + -h, --help help for gcp + --projectID string ID of the GCP project the configuration will be created in (required) + Find it on the welcome screen of your project: https://console.cloud.google.com/welcome + --serviceAccountID string ID for the service account that will be created (required) + Must be 6 to 30 lowercase letters, digits, or hyphens. + --zone string GCP zone the cluster will be deployed in (required) + Find a list of available zones here: https://cloud.google.com/compute/docs/regions-zones#available +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + --update-config update the config file with the specific IAM information + -C, --workspace string path to the Constellation workspace + -y, --yes create the IAM configuration without further confirmation +``` + +## constellation iam destroy + +Destroy an IAM configuration and delete local Terraform files + +### Synopsis + +Destroy an IAM configuration and delete local Terraform files. + +``` +constellation iam destroy [flags] +``` + +### Options + +``` + -h, --help help for destroy + -y, --yes destroy the IAM configuration without asking for confirmation +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation iam upgrade + +Find and apply upgrades to your IAM profile + +### Synopsis + +Find and apply upgrades to your IAM profile. + +### Options + +``` + -h, --help help for upgrade +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation iam upgrade apply + +Apply an upgrade to an IAM profile + +### Synopsis + +Apply an upgrade to an IAM profile. + +``` +constellation iam upgrade apply [flags] +``` + +### Options + +``` + -h, --help help for apply + -y, --yes run upgrades without further confirmation +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + +## constellation version + +Display version of this CLI + +### Synopsis + +Display version of this CLI. + +``` +constellation version [flags] +``` + +### Options + +``` + -h, --help help for version +``` + +### Options inherited from parent commands + +``` + --debug enable debug logging + --force disable version compatibility checks - might result in corrupted clusters + --tf-log string Terraform log level (default "NONE") + -C, --workspace string path to the Constellation workspace +``` + diff --git a/docs/versioned_docs/version-2.11/reference/migration.md b/docs/versioned_docs/version-2.11/reference/migration.md new file mode 100644 index 000000000..36680eef6 --- /dev/null +++ b/docs/versioned_docs/version-2.11/reference/migration.md @@ -0,0 +1,85 @@ +# Migrations + +This document describes breaking changes and migrations between Constellation releases. +Use [`constellation config migrate`](./cli.md#constellation-config-migrate) to automatically update an old config file to a new format. + +## Migrating from Azure's service principal authentication to managed identity authentication + +- The `provider.azure.appClientID` and `provider.azure.appClientSecret` fields are no longer supported and should be removed. +- To keep using an existing UAMI, add the `Owner` permission with the scope of your `resourceGroup`. +- Otherwise, simply [create new Constellation IAM credentials](../workflows/config.md#creating-an-iam-configuration) and use the created UAMI. +- To migrate the authentication for an existing cluster on Azure to an UAMI with the necessary permissions: + 1. Remove the `aadClientId` and `aadClientSecret` from the azureconfig secret. + 2. Set `useManagedIdentityExtension` to `true` and use the `userAssignedIdentity` from the Constellation config for the value of `userAssignedIdentityID`. + 3. Restart the CSI driver, cloud controller manager, cluster autoscaler, and Constellation operator pods. + + +## Migrating from CLI versions before 2.10 + +- AWS cluster upgrades require additional IAM permissions for the newly introduced `aws-load-balancer-controller`. Please upgrade your IAM roles using `iam upgrade apply`. This will show necessary changes and apply them, if desired. +- The global `nodeGroups` field was added. +- The fields `instanceType`, `stateDiskSizeGB`, and `stateDiskType` for each cloud provider are now part of the configuration of individual node groups. +- The `constellation create` command no longer uses the flags `--control-plane-count` and `--worker-count`. Instead, the initial node count is configured per node group in the `nodeGroups` field. + +## Migrating from CLI versions before 2.9 + +- The `provider.azure.appClientID` and `provider.azure.clientSecretValue` fields were removed to enforce migration to managed identity authentication + +## Migrating from CLI versions before 2.8 + +- The `measurements` field for each cloud service provider was replaced with a global `attestation` field. +- The `confidentialVM`, `idKeyDigest`, and `enforceIdKeyDigest` fields for the Azure cloud service provider were removed in favor of using the global `attestation` field. +- The optional global field `attestationVariant` was replaced by the now required `attestation` field. + +## Migrating from CLI versions before 2.3 + +- The `sshUsers` field was deprecated in v2.2 and has been removed from the configuration in v2.3. + As an alternative for SSH, check the workflow section [Connect to nodes](../workflows/troubleshooting.md#node-shell-access). +- The `image` field for each cloud service provider has been replaced with a global `image` field. Use the following mapping to migrate your configuration: +
+ Show all + + | CSP | old image | new image | + | ----- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | + | AWS | `ami-06b8cbf4837a0a57c` | `v2.2.2` | + | AWS | `ami-02e96dc04a9e438cd` | `v2.2.2` | + | AWS | `ami-028ead928a9034b2f` | `v2.2.2` | + | AWS | `ami-032ac10dd8d8266e3` | `v2.2.1` | + | AWS | `ami-032e0d57cc4395088` | `v2.2.1` | + | AWS | `ami-053c3e49e19b96bdd` | `v2.2.1` | + | AWS | `ami-0e27ebcefc38f648b` | `v2.2.0` | + | AWS | `ami-098cd37f66523b7c3` | `v2.2.0` | + | AWS | `ami-04a87d302e2509aad` | `v2.2.0` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.2` | `v2.2.2` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.2` | `v2.2.2` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.1` | `v2.2.1` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.1` | `v2.2.1` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.2.0` | `v2.2.0` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.2.0` | `v2.2.0` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.1.0` | `v2.1.0` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.1.0` | `v2.1.0` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation/images/constellation/versions/2.0.0` | `v2.0.0` | + | Azure | `/subscriptions/0d202bbb-4fa7-4af8-8125-58c269a05435/resourceGroups/constellation-images/providers/Microsoft.Compute/galleries/Constellation_CVM/images/constellation/versions/2.0.0` | `v2.0.0` | + | GCP | `projects/constellation-images/global/images/constellation-v2-2-2` | `v2.2.2` | + | GCP | `projects/constellation-images/global/images/constellation-v2-2-1` | `v2.2.1` | + | GCP | `projects/constellation-images/global/images/constellation-v2-2-0` | `v2.2.0` | + | GCP | `projects/constellation-images/global/images/constellation-v2-1-0` | `v2.1.0` | + | GCP | `projects/constellation-images/global/images/constellation-v2-0-0` | `v2.0.0` | +
+- The `enforcedMeasurements` field has been removed and merged with the `measurements` field. + - To migrate your config containing a new image (`v2.3` or greater), remove the old `measurements` and `enforcedMeasurements` entries from your config and run `constellation fetch-measurements` + - To migrate your config containing an image older than `v2.3`, remove the `enforcedMeasurements` entry and replace the entries in `measurements` as shown in the example below: + + ```diff + measurements: + - 0: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= + + 0: + + expected: DzXCFGCNk8em5ornNZtKi+Wg6Z7qkQfs5CfE3qTkOc8= + + warnOnly: true + - 8: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= + + 8: + + expected: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= + + warnOnly: false + -enforcedMeasurements: + - - 8 + ``` diff --git a/docs/versioned_docs/version-2.11/reference/slsa.md b/docs/versioned_docs/version-2.11/reference/slsa.md new file mode 100644 index 000000000..267a75ed0 --- /dev/null +++ b/docs/versioned_docs/version-2.11/reference/slsa.md @@ -0,0 +1,73 @@ +# Supply chain levels for software artifacts (SLSA) adoption + +[Supply chain Levels for Software Artifacts, or SLSA (salsa)](https://slsa.dev/) is a framework for improving and grading a project's build system and engineering processes. SLSA focuses on security improvements for source code storage as well as build system definition, execution, and observation. SLSA is structured in [four levels](https://slsa.dev/spec/v0.1/levels). This page describes the adoption of SLSA for Constellation. + +:::info +SLSA is still in alpha status. The presented levels and their requirements might change in the future. We will adopt any changes into our engineering processes, as they get defined. +::: + +## Level 1 - Adopted + +**[Build - Scripted](https://slsa.dev/spec/v0.1/requirements#scripted-build)** + +All build steps are automated via [CMake](https://github.com/edgelesssys/constellation/blob/main/CMakeLists.txt) and [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). + +**[Provenance - Available](https://slsa.dev/spec/v0.1/requirements#available)** + +Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). + +## Level 2 - Adopted + +**[Source - Version Controlled](https://slsa.dev/spec/v0.1/requirements#version-controlled)** + +Constellation is hosted on GitHub using git. + +**[Build - Build Service](https://slsa.dev/spec/v0.1/requirements#build-service)** + +All builds are carried out by [GitHub Actions](https://github.com/edgelesssys/constellation/tree/main/.github). + +**[Provenance - Authenticated](https://slsa.dev/spec/v0.1/requirements#authenticated)** + +Provenance for the CLI is signed using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator). Learn [how to verify the CLI](../workflows/verify-cli.md) using the signed provenance, before using it for the first time. + +**[Provenance - Service Generated](https://slsa.dev/spec/v0.1/requirements#service-generated)** + +Provenance for the CLI is generated using the [slsa-github-generator](https://github.com/slsa-framework/slsa-github-generator) in GitHub Actions. + +## Level 3 - Adopted + +**[Source - Verified History](https://slsa.dev/spec/v0.1/requirements#verified-history)** + +The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization [requires two-factor authentication](https://docs.github.com/en/organizations/keeping-your-organization-secure/managing-two-factor-authentication-for-your-organization/requiring-two-factor-authentication-in-your-organization) for all members. + +**[Source - Retained Indefinitely](https://slsa.dev/spec/v0.1/requirements#retained-indefinitely)** + +Since we use GitHub to host the repository, an external person can't modify or delete the history. Before a pull request can be merged, an explicit approval from an [Edgeless Systems](https://github.com/edgelesssys) team member is required. + +The same holds true for changes proposed by team members. Each change to `main` needs to be proposed via a pull request and requires at least one approval. + +The [Edgeless Systems](https://github.com/edgelesssys) GitHub organization admins control these settings and are able to make changes to the repository's history should legal requirements necessitate it. These changes require two-party approval following the obliterate policy. + +**[Build - Build as Code](https://slsa.dev/spec/v0.1/requirements#build-as-code)** + +All build files for Constellation are stored in [the same repository](https://github.com/edgelesssys/constellation/tree/main/.github). + +**[Build - Ephemeral Environment](https://slsa.dev/spec/v0.1/requirements#ephemeral-environment)** + +All GitHub Action workflows are executed on [GitHub-hosted runners](https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners). These runners are only available during workflow. + +We currently don't use [self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners). + +**[Build - Isolated](https://slsa.dev/spec/v0.1/requirements#isolated)** + +As outlined in the previous section, we use GitHub-hosted runners, which provide a new, isolated and ephemeral environment for each build. + +Additionally, the [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator#generation-of-provenance) itself is run in an isolated workflow with the artifact hash as defined inputs. + +**[Provenance - Non-falsifiable](https://slsa.dev/spec/v0.1/requirements#non-falsifiable)** + +As outlined by [SLSA GitHub generator](https://github.com/slsa-framework/slsa-github-generator) it already fulfills the non-falsifiable requirements for SLSA Level 3. The generated provenance is signed using [sigstore](https://sigstore.dev/) with an OIDC based proof of identity. + +## Level 4 - In Progress + +We strive to adopt certain aspect of SLSA Level 4 that support our engineering process. At the same time, SLSA is still in alpha status and the biggest changes to SLSA are expected to be around Level 4. diff --git a/docs/versioned_docs/version-2.11/reference/terraform.md b/docs/versioned_docs/version-2.11/reference/terraform.md new file mode 100644 index 000000000..9b4ef7908 --- /dev/null +++ b/docs/versioned_docs/version-2.11/reference/terraform.md @@ -0,0 +1,37 @@ +# Terraform usage + +[Terraform](https://www.terraform.io/) is an Infrastructure as Code (IaC) framework to manage cloud resources. This page explains how Constellation uses it internally and how advanced users may manually use it to have more control over the resource creation. + +:::info +Information on this page is intended for users who are familiar with Terraform. +It's not required for common usage of Constellation. +See the [Terraform documentation](https://developer.hashicorp.com/terraform/docs) if you want to learn more about it. +::: + +## Terraform state files + +Constellation keeps Terraform state files in subdirectories of the workspace together with the corresponding Terraform configuration files and metadata. +The subdirectories are created on the first Constellation CLI action that uses Terraform internally. + +Currently, these subdirectories are: + +* `constellation-terraform` - Terraform state files for the resources of the Constellation cluster +* `constellation-iam-terraform` - Terraform state files for IAM configuration + +As with all commands, commands that work with these files (e.g., `create`, `terminate`, `iam`) have to be executed from the root of the cluster's [workspace directory](../architecture/orchestration.md#workspaces). You usually don't need and shouldn't manipulate or delete the subdirectories manually. + +## Interacting with Terraform manually + +Manual interaction with Terraform state created by Constellation (i.e., via the Terraform CLI) should only be performed by experienced users. It may lead to unrecoverable loss of cloud resources. For the majority of users and use cases, the interaction done by the [Constellation CLI](cli.md) is sufficient. + +## Terraform debugging + +To debug Terraform issues, the Constellation CLI offers the `tf-log` flag. You can set it to any of [Terraform's log levels](https://developer.hashicorp.com/terraform/internals/debugging): +- `JSON` (JSON-formatted logs at `TRACE` level) +- `TRACE` +- `DEBUG` +- `INFO` +- `WARN` +- `ERROR` + +The log output is written to the `terraform.log` file in the workspace directory. The output is appended to the file on each run. diff --git a/docs/versioned_docs/version-2.11/workflows/cert-manager.md b/docs/versioned_docs/version-2.11/workflows/cert-manager.md new file mode 100644 index 000000000..1d847e8bf --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/cert-manager.md @@ -0,0 +1,13 @@ +# Install cert-manager + +:::caution +If you want to use cert-manager with Constellation, pay attention to the following to avoid potential pitfalls. +::: + +Constellation ships with cert-manager preinstalled. +The default installation is part of the `kube-system` namespace, as all other Constellation-managed microservices. +You are free to install more instances of cert-manager into other namespaces. +However, be aware that any new installation needs to use the same version as the one installed with Constellation or rely on the same CRD versions. +Also remember to set the `installCRDs` value to `false` when installing new cert-manager instances. +It will create problems if you have two installations of cert-manager depending on different versions of the installed CRDs. +CRDs are cluster-wide resources and cert-manager depends on specific versions of those CRDs for each release. diff --git a/docs/versioned_docs/version-2.11/workflows/config.md b/docs/versioned_docs/version-2.11/workflows/config.md new file mode 100644 index 000000000..95f95aeec --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/config.md @@ -0,0 +1,315 @@ +# Configure your cluster + +:::info +This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. +::: + + + +--- + +Before you can create your cluster, you need to configure the identity and access management (IAM) for your cloud service provider (CSP) and choose machine types for the nodes. + +## Creating the configuration file + +You can generate a configuration file for your CSP by using the following CLI command: + + + + +```bash +constellation config generate azure +``` + + + + +```bash +constellation config generate gcp +``` + + + + +```bash +constellation config generate aws +``` + + + + +This creates the file `constellation-conf.yaml` in the current directory. + +## Choosing a VM type + +Constellation supports the following VM types: + + + +By default, Constellation uses `Standard_DC4as_v5` CVMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. For CVMs, any VM type with a minimum of 4 vCPUs from the [DCasv5 & DCadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/dcasv5-dcadsv5-series) or [ECasv5 & ECadsv5](https://docs.microsoft.com/en-us/azure/virtual-machines/ecasv5-ecadsv5-series) families is supported. + +You can also run `constellation config instance-types` to get the list of all supported options. + + + + +By default, Constellation uses `n2d-standard-4` VMs (4 vCPUs, 16 GB RAM) to create your cluster. Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. Supported are all machines with a minimum of 4 vCPUs from the [C2D](https://cloud.google.com/compute/docs/compute-optimized-machines#c2d_machine_types) or [N2D](https://cloud.google.com/compute/docs/general-purpose-machines#n2d_machines) family. You can run `constellation config instance-types` to get the list of all supported options. + + + + +By default, Constellation uses `m6a.xlarge` VMs (4 vCPUs, 16 GB RAM) to create your cluster. +Optionally, you can switch to a different VM type by modifying **instanceType** in the configuration file. +If you are using the default attestation variant `awsSEVSNP`, you can use the instance types described in [AWS's AMD SEV-SNP docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/snp-requirements.html). +Please mind the region restrictions mentioned in the [Getting started](../getting-started/first-steps.md#create-a-cluster) section. + +If you are using the attestation variant `awsNitroTPM`, you can choose any of the [nitroTPM-enabled instance types](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/enable-nitrotpm-prerequisites.html). + +The Constellation CLI can also print the supported instance types with: `constellation config instance-types`. + +:::caution +Due to a bug in AWS's SNP implementation, SNP report generation currently fails in unforeseeable circumstances. +Therefore, even if you select attestation variant `awsSEVSNP`, Constellation still uses NitroTPM-based attestation. +Nonetheless, runtime encryption is enabled. +AWS is currently investigating the issue. +SNP-based attestation will be enabled as soon as a fix is verified. +::: + + + + +Fill the desired VM type into the **instanceType** fields in the `constellation-conf.yml` file. + +## Creating additional node groups + +By default, Constellation creates the node groups `control_plane_default` and `worker_default` for control-plane nodes and workers, respectively. +If you require additional control-plane or worker groups with different instance types, zone placements, or disk sizes, you can add additional node groups to the `constellation-conf.yml` file. +Each node group can be scaled individually. + + +Consider the following example for AWS: + +```yaml +nodeGroups: + control_plane_default: + role: control-plane + instanceType: c6a.xlarge + stateDiskSizeGB: 30 + stateDiskType: gp3 + zone: eu-west-1c + initialCount: 3 + worker_default: + role: worker + instanceType: c6a.xlarge + stateDiskSizeGB: 30 + stateDiskType: gp3 + zone: eu-west-1c + initialCount: 2 + high_cpu: + role: worker + instanceType: c6a.24xlarge + stateDiskSizeGB: 128 + stateDiskType: gp3 + zone: eu-west-1c + initialCount: 1 +``` + +This configuration creates an additional node group `high_cpu` with a larger instance type and disk. + +You can use the field `zone` to specify what availability zone nodes of the group are placed in. +On Azure, this field is empty by default and nodes are automatically spread across availability zones. +Consult the documentation of your cloud provider for more information: + +- [AWS](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/) +- [Azure](https://azure.microsoft.com/en-us/explore/global-infrastructure/availability-zones) +- [GCP](https://cloud.google.com/compute/docs/regions-zones) + +## Choosing a Kubernetes version + +To learn which Kubernetes versions can be installed with your current CLI, you can run `constellation config kubernetes-versions`. +See also Constellation's [Kubernetes support policy](../architecture/versions.md#kubernetes-support-policy). + +## Creating an IAM configuration + +You can create an IAM configuration for your cluster automatically using the `constellation iam create` command. +If you already have a Constellation configuration file, you can add the `--update-config` flag to the command. This writes the needed IAM fields into your configuration. Furthermore, the flag updates the zone/region of the configuration if it hasn't been set yet. + + + + +You must be authenticated with the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). + +```bash +constellation iam create azure --region=westus --resourceGroup=constellTest --servicePrincipal=spTest +``` + +This command creates IAM configuration on the Azure region `westus` creating a new resource group `constellTest` and a new service principal `spTest`. + +Note that CVMs are currently only supported in a few regions, check [Azure's products available by region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). These are: + +* `westus` +* `eastus` +* `northeurope` +* `westeurope` +* `southeastasia` + +Paste the output into the corresponding fields of the `constellation-conf.yaml` file. + + + + +You must be authenticated with the [GCP CLI](https://cloud.google.com/sdk/gcloud) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). + +```bash +constellation iam create gcp --projectID=yourproject-12345 --zone=europe-west2-a --serviceAccountID=constell-test +``` + +This command creates IAM configuration in the GCP project `yourproject-12345` on the GCP zone `europe-west2-a` creating a new service account `constell-test`. + +Note that only regions offering CVMs of the `C2D` or `N2D` series are supported. You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available), which you can filter by machine type `N2D`. + +Paste the output into the corresponding fields of the `constellation-conf.yaml` file. + + + + +You must be authenticated with the [AWS CLI](https://aws.amazon.com/en/cli/) in the shell session with a user that has the [required permissions for IAM creation](../getting-started/install.md#set-up-cloud-credentials). + +```bash +constellation iam create aws --zone=us-east-2a --prefix=constellTest +``` + +This command creates IAM configuration for the AWS zone `us-east-2a` using the prefix `constellTest` for all named resources being created. + +Constellation OS images are currently replicated to the following regions: + +* `eu-central-1` +* `eu-west-1` +* `eu-west-3` +* `us-east-2` +* `ap-south-1` + +If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). + +You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). + +Paste the output into the corresponding fields of the `constellation-conf.yaml` file. + + + + +
+Alternatively, you can manually create the IAM configuration on your CSP. + +The following describes the configuration fields and how you obtain the required information or create the required resources. + + + + +* **subscription**: The UUID of your Azure subscription, e.g., `8b8bd01f-efd9-4113-9bd1-c82137c32da7`. + + You can view your subscription UUID via `az account show` and read the `id` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-subscription). + +* **tenant**: The UUID of your Azure tenant, e.g., `3400e5a2-8fe2-492a-886c-38cb66170f25`. + + You can view your tenant UUID via `az account show` and read the `tenant` field. For more information refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/azure-portal/get-subscription-tenant-id#find-your-azure-ad-tenant). + +* **location**: The Azure datacenter location you want to deploy your cluster in, e.g., `westus`. CVMs are currently only supported in a few regions, check [Azure's products available by region](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=virtual-machines®ions=all). These are: + + * `westus` + * `eastus` + * `northeurope` + * `westeurope` + * `southeastasia` + +* **resourceGroup**: [Create a new resource group in Azure](https://learn.microsoft.com/azure/azure-resource-manager/management/manage-resource-groups-portal) for your Constellation cluster. Set this configuration field to the name of the created resource group. + +* **userAssignedIdentity**: [Create a new managed identity in Azure](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). You should create the identity in a different resource group as all resources within the cluster resource group will be deleted on cluster termination. + + Add three role assignments to the identity: `Owner`, `Virtual Machine Contributor`, and `Application Insights Component Contributor`. The `scope` of all three should refer to the previously created cluster resource group. + + Set the configuration value to the full ID of the created identity, e.g., `/subscriptions/8b8bd01f-efd9-4113-9bd1-c82137c32da7/resourcegroups/constellation-identity/providers/Microsoft.ManagedIdentity/userAssignedIdentities/constellation-identity`. You can get it by opening the `JSON View` from the `Overview` section of the identity. + + The user-assigned identity is used by instances of the cluster to access other cloud resources. + For more information about managed identities refer to [Azure's documentation](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-manage-user-assigned-managed-identities). + + + + + +* **project**: The ID of your GCP project, e.g., `constellation-129857`. + + You can find it on the [welcome screen of your GCP project](https://console.cloud.google.com/welcome). For more information refer to [Google's documentation](https://support.google.com/googleapi/answer/7014113). + +* **region**: The GCP region you want to deploy your cluster in, e.g., `us-west1`. + + You can find a [list of all regions in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). + +* **zone**: The GCP zone you want to deploy your cluster in, e.g., `us-west1-a`. + + You can find a [list of all zones in Google's documentation](https://cloud.google.com/compute/docs/regions-zones#available). + +* **serviceAccountKeyPath**: To configure this, you need to create a GCP [service account](https://cloud.google.com/iam/docs/service-accounts) with the following permissions: + + * `Compute Instance Admin (v1) (roles/compute.instanceAdmin.v1)` + * `Compute Network Admin (roles/compute.networkAdmin)` + * `Compute Security Admin (roles/compute.securityAdmin)` + * `Compute Storage Admin (roles/compute.storageAdmin)` + * `Service Account User (roles/iam.serviceAccountUser)` + + Afterward, create and download a new JSON key for this service account. Place the downloaded file in your Constellation workspace, and set the config parameter to the filename, e.g., `constellation-129857-15343dba46cb.json`. + + + + + +* **region**: The name of your chosen AWS data center region, e.g., `us-east-2`. + + Constellation OS images are currently replicated to the following regions: + * `eu-central-1` + * `eu-west-1` + * `eu-west-3` + * `us-east-2` + * `ap-south-1` + + If you require the OS image to be available in another region, [let us know](https://github.com/edgelesssys/constellation/issues/new?assignees=&labels=&template=feature_request.md&title=Support+new+AWS+image+region:+xx-xxxx-x). + + You can find a list of all [regions in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-available-regions). + +* **zone**: The name of your chosen AWS data center availability zone, e.g., `us-east-2a`. + + Learn more about [availability zones in AWS's documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones). + +* **iamProfileControlPlane**: The name of an IAM instance profile attached to all control-plane nodes. + + You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `control_plane_instance_profile`. + + Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.control_plane_policy`. + +* **iamProfileWorkerNodes**: The name of an IAM instance profile attached to all worker nodes. + + You can create the resource with [Terraform](https://www.terraform.io/). For that, use the [provided Terraform script](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam) to generate the necessary profile. The profile name will be provided as Terraform output value: `worker_nodes_instance_profile`. + + Alternatively, you can create the AWS profile with a tool of your choice. Use the JSON policy in [main.tf](https://github.com/edgelesssys/constellation/tree/release/v2.2/hack/terraform/aws/iam/main.tf) in the resource `aws_iam_policy.worker_node_policy`. + + + + +
+ +Now that you've configured your CSP, you can [create your cluster](./create.md). + +## Deleting an IAM configuration + +You can keep a created IAM configuration and reuse it for new clusters. Alternatively, you can also delete it if you don't want to use it anymore. + +Delete the IAM configuration by executing the following command in the same directory where you executed `constellation iam create` (the directory that contains [`constellation-iam-terraform`](../reference/terraform.md) as a subdirectory): + +```bash +constellation iam destroy +``` + +:::caution +For Azure, deleting the IAM configuration by executing `constellation iam destroy` will delete the whole resource group created by `constellation iam create`. +This also includes any additional resources in the resource group that weren't created by Constellation. +::: diff --git a/docs/versioned_docs/version-2.11/workflows/create.md b/docs/versioned_docs/version-2.11/workflows/create.md new file mode 100644 index 000000000..d65af1148 --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/create.md @@ -0,0 +1,99 @@ +# Create your cluster + +:::info +This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. +::: + + + +--- + +Creating your cluster requires two steps: + +1. Creating the necessary resources in your cloud environment +2. Bootstrapping the Constellation cluster and setting up a connection + +See the [architecture](../architecture/orchestration.md) section for details on the inner workings of this process. + +:::tip +If you don't have a cloud subscription, you can also set up a [local Constellation cluster using virtualization](../getting-started/first-steps-local.md) for testing. +::: + +## The *create* step + +This step creates the necessary resources for your cluster in your cloud environment. +Before you create the cluster, make sure to have a [valid configuration file](./config.md). + +### Create + + + + +```bash +constellation create +``` + +*create* stores your cluster's state in a [`constellation-terraform`](../architecture/orchestration.md#cluster-creation-process) directory in your workspace. + + + + +Terraform allows for an easier GitOps integration as well as meeting regulatory requirements. +Since the Constellation CLI also uses Terraform under the hood, you can reuse the same Terraform files. + +:::info +Familiarize with the [Terraform usage policy](../reference/terraform.md) before manually interacting with Terraform to create a cluster. +Please also refrain from changing the Terraform resource definitions, as Constellation is tightly coupled to them. +::: + +Download the Terraform files for the selected CSP from the [GitHub repository](https://github.com/edgelesssys/constellation/tree/main/cli/internal/terraform/terraform). + +Create a `terraform.tfvars` file. +There, define all needed variables found in `variables.tf` using the values from the `constellation-config.yaml`. + +To find the image reference for your CSP and region, execute: + +```bash +CONSTELL_VER=vX.Y.Z +curl -s https://cdn.confidential.cloud/constellation/v1/ref/-/stream/stable/$CONSTELL_VER/image/info.json | jq +``` + +Initialize and apply Terraform to create the configured infrastructure: + +```bash +terraform init +terraform apply +``` + +The Constellation [init step](#the-init-step) requires the already created `constellation-config.yaml` and the `constellation-id.json`. +Create the `constellation-id.json` using the output from the Terraform state and the `constellation-conf.yaml`: + +```bash +CONSTELL_IP=$(terraform output ip) +CONSTELL_INIT_SECRET=$(terraform output initSecret | jq -r | tr -d '\n' | base64) +CONSTELL_CSP=$(cat constellation-conf.yaml | yq ".provider | keys | .[0]") +jq --null-input --arg cloudprovider "$CONSTELL_CSP" --arg ip "$CONSTELL_IP" --arg initsecret "$CONSTELL_INIT_SECRET" '{"cloudprovider":$cloudprovider,"ip":$ip,"initsecret":$initsecret}' > constellation-id.json +``` + + + + +## The *init* step + +The following command initializes and bootstraps your cluster: + +```bash +constellation init +``` + +Next, configure `kubectl` for your cluster: + +```bash +export KUBECONFIG="$PWD/constellation-admin.conf" +``` + +🏁 That's it. You've successfully created a Constellation cluster. + + +### Troubleshooting +In case `init` fails, the CLI collects logs from the bootstrapping instance and stores them inside `constellation-cluster.log`. diff --git a/docs/versioned_docs/version-2.11/workflows/lb.md b/docs/versioned_docs/version-2.11/workflows/lb.md new file mode 100644 index 000000000..b7189eecd --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/lb.md @@ -0,0 +1,16 @@ +# Expose a service +Constellation integrates the native load balancers of each CSP. Therefore, to expose a service simply [create a service of type `LoadBalancer`](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). + + + +## Internet-facing LB service on AWS + +To expose your application service externally you might want to use a Kubernetes Service of type `LoadBalancer`. On AWS, load-balancing is achieved through the [AWS Load Balancing Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller) as in the managed EKS. + +Since recent versions, the controller deploy an internal LB by default requiring to set an annotation `service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing` to have an internet-facing LB. For more details, see the [official docs](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.2/guide/service/nlb/). + +For general information on LB with AWS see [Network load balancing on Amazon EKS](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html). + +:::caution +Before terminating the cluster, all LB backed services should be deleted, so that the controller can cleanup the related resources. +::: diff --git a/docs/versioned_docs/version-2.11/workflows/recovery.md b/docs/versioned_docs/version-2.11/workflows/recovery.md new file mode 100644 index 000000000..c26fb32eb --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/recovery.md @@ -0,0 +1,148 @@ +# Recover your cluster + +Recovery of a Constellation cluster means getting it back into a healthy state after too many concurrent node failures in the control plane. +Reasons for an unhealthy cluster can vary from a power outage, or planned reboot, to migration of nodes and regions. +Recovery events are rare, because Constellation is built for high availability and automatically and securely replaces failed nodes. When a node is replaced, Constellation's control plane first verifies the new node before it sends the node the cryptographic keys required to decrypt its [state disk](../architecture/images.md#state-disk). + +Constellation provides a recovery mechanism for cases where the control plane has failed and is unable to replace nodes. +The `constellation recover` command securely connects to all nodes in need of recovery using [attested TLS](../architecture/attestation.md#attested-tls-atls) and provides them with the keys to decrypt their state disks and continue booting. + +## Identify unhealthy clusters + +The first step to recovery is identifying when a cluster becomes unhealthy. +Usually, this can be first observed when the Kubernetes API server becomes unresponsive. + +You can check the health status of the nodes via the cloud service provider (CSP). +Constellation provides logging information on the boot process and status via [cloud logging](troubleshooting.md#cloud-logging). +In the following, you'll find detailed descriptions for identifying clusters stuck in recovery for each CSP. + + + + +In the Azure portal, find the cluster's resource group. +Inside the resource group, open the control plane *Virtual machine scale set* `constellation-scale-set-controlplanes-`. +On the left, go to **Settings** > **Instances** and check that enough members are in a *Running* state. + +Second, check the boot logs of these *Instances*. +In the scale set's *Instances* view, open the details page of the desired instance. +On the left, go to **Support + troubleshooting** > **Serial console**. + +In the serial console output, search for `Waiting for decryption key`. +Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): + +```json +{"level":"INFO","ts":"2022-09-08T09:56:41Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"azure"} +{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} +{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} +{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} +``` + +The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. +If this fails due to an unhealthy control plane, you will see log messages similar to the following: + +```json +{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["10.9.0.5:30090","10.9.0.6:30090"]} +{"level":"INFO","ts":"2022-09-08T09:56:43Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.5:30090"} +{"level":"WARN","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.5:30090: i/o timeout\"","endpoint":"10.9.0.5:30090"} +{"level":"INFO","ts":"2022-09-08T09:57:03Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"10.9.0.6:30090"} +{"level":"WARN","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 10.9.0.6:30090: i/o timeout\"","endpoint":"10.9.0.6:30090"} +{"level":"ERROR","ts":"2022-09-08T09:57:23Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} +``` + +This means that you have to recover the node manually. + + + + +First, check that the control plane *Instance Group* has enough members in a *Ready* state. +In the GCP Console, go to **Instance Groups** and check the group for the cluster's control plane `-control-plane-`. + +Second, check the status of the *VM Instances*. +Go to **VM Instances** and open the details of the desired instance. +Check the serial console output of that instance by opening the **Logs** > **Serial port 1 (console)** page: + +![GCP portal serial console link](../_media/recovery-gcp-serial-console-link.png) + +In the serial console output, search for `Waiting for decryption key`. +Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): + +```json +{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} +``` + +The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. +If this fails due to an unhealthy control plane, you will see log messages similar to the following: + +```json +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} +{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} +{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} +{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} +``` + +This means that you have to recover the node manually. + + + + +First, open the AWS console to view all Auto Scaling Groups (ASGs) in the region of your cluster. Select the ASG of the control plane `--control-plane` and check that enough members are in a *Running* state. + +Second, check the boot logs of these *Instances*. In the ASG's *Instance management* view, select each desired instance. In the upper right corner, select **Action > Monitor and troubleshoot > Get system log**. + +In the serial console output, search for `Waiting for decryption key`. +Similar output to the following means your node was restarted and needs to decrypt the [state disk](../architecture/images.md#state-disk): + +```json +{"level":"INFO","ts":"2022-09-08T10:21:53Z","caller":"cmd/main.go:55","msg":"Starting disk-mapper","version":"2.0.0","cloudProvider":"gcp"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"setupManager","caller":"setup/setup.go:72","msg":"Preparing existing state disk"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:65","msg":"Starting RejoinClient"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"recoveryServer","caller":"recoveryserver/server.go:59","msg":"Starting RecoveryServer"} +``` + +The node will then try to connect to the [*JoinService*](../architecture/microservices.md#joinservice) and obtain the decryption key. +If this fails due to an unhealthy control plane, you will see log messages similar to the following: + +```json +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:77","msg":"Received list with JoinService endpoints","endpoints":["192.168.178.4:30090","192.168.178.2:30090"]} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.4:30090"} +{"level":"WARN","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.4:30090: connect: connection refused\"","endpoint":"192.168.178.4:30090"} +{"level":"INFO","ts":"2022-09-08T10:21:53Z","logger":"rejoinClient","caller":"rejoinclient/client.go:96","msg":"Requesting rejoin ticket","endpoint":"192.168.178.2:30090"} +{"level":"WARN","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:101","msg":"Failed to rejoin on endpoint","error":"rpc error: code = Unavailable desc = connection error: desc = \"transport: Error while dialing dial tcp 192.168.178.2:30090: i/o timeout\"","endpoint":"192.168.178.2:30090"} +{"level":"ERROR","ts":"2022-09-08T10:22:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:110","msg":"Failed to rejoin on all endpoints"} +``` + +This means that you have to recover the node manually. + + + + +## Recover a cluster + +Recovering a cluster requires the following parameters: + +* The `constellation-id.json` file in your working directory or the cluster's load balancer IP address +* The master secret of the cluster + +A cluster can be recovered like this: + +```bash +$ constellation recover --master-secret constellation-mastersecret.json +Pushed recovery key. +Pushed recovery key. +Pushed recovery key. +Recovered 3 control-plane nodes. +``` + +In the serial console output of the node you'll see a similar output to the following: + +```json +{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:93","msg":"Received recover call"} +{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer","caller":"recoveryserver/server.go:125","msg":"Received state disk key and measurement secret, shutting down server"} +{"level":"INFO","ts":"2022-09-08T10:26:59Z","logger":"recoveryServer.gRPC","caller":"zap/server_interceptors.go:61","msg":"finished streaming call with code OK","grpc.start_time":"2022-09-08T10:26:59Z","system":"grpc","span.kind":"server","grpc.service":"recoverproto.API","grpc.method":"Recover","peer.address":"192.0.2.3:41752","grpc.code":"OK","grpc.time_ms":15.701} +{"level":"INFO","ts":"2022-09-08T10:27:13Z","logger":"rejoinClient","caller":"rejoinclient/client.go:87","msg":"RejoinClient stopped"} +``` diff --git a/docs/versioned_docs/version-2.11/workflows/sbom.md b/docs/versioned_docs/version-2.11/workflows/sbom.md new file mode 100644 index 000000000..9ef6eb65c --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/sbom.md @@ -0,0 +1,91 @@ +# Consume software bill of materials (SBOMs) + + + +--- + +Constellation builds produce a [software bill of materials (SBOM)](https://www.ntia.gov/SBOM) for each generated [artifact](../architecture/microservices.md). +You can use SBOMs to make informed decisions about dependencies and vulnerabilities in a given application. Enterprises rely on SBOMs to maintain an inventory of used applications, which allows them to take data-driven approaches to managing risks related to vulnerabilities. + +SBOMs for Constellation are generated using [Syft](https://github.com/anchore/syft), signed using [Cosign](https://github.com/sigstore/cosign), and stored with the produced artifact. + +:::note +The public key for Edgeless Systems' long-term code-signing key is: +``` +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT +JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== +-----END PUBLIC KEY----- +``` +The public key is also available for download at https://edgeless.systems/es.pub and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). + +Make sure the key is available in a file named `cosign.pub` to execute the following examples. +::: + +## Verify and download SBOMs + +The following sections detail how to work with each type of artifact to verify and extract the SBOM. + +### Constellation CLI + +The SBOM for Constellation CLI is made available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). The SBOM (`constellation.spdx.sbom`) and corresponding signature (`constellation.spdx.sbom.sig`) are valid for each Constellation CLI for a given version, regardless of architecture and operating system. + +```bash +curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom +curl -LO https://github.com/edgelesssys/constellation/releases/download/v2.2.0/constellation.spdx.sbom.sig +cosign verify-blob --key cosign.pub --signature constellation.spdx.sbom.sig constellation.spdx.sbom +``` + +### Container Images + +SBOMs for container images are [attached to the image using Cosign](https://docs.sigstore.dev/signing/other_types#sboms-software-bill-of-materials) and uploaded to the same registry. + +As a consumer, use cosign to download and verify the SBOM: + +```bash +# Verify and download the attestation statement +cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.att.json +# Extract SBOM from attestation statement +jq -r .payload verification-service.att.json | base64 -d > verification-service.cyclonedx.sbom +``` + +A successful verification should result in similar output: + +```shell-session +$ cosign verify-attestation ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 --type 'https://cyclonedx.org/bom' --key cosign.pub --output-file verification-service.sbom + +Verification for ghcr.io/edgelesssys/constellation/verification-service@v2.2.0 -- +The following checks were performed on each of these signatures: + - The cosign claims were validated + - The signatures were verified against the specified public key +$ jq -r .payload verification-service.sbom | base64 -d > verification-service.cyclonedx.sbom +``` + +:::note + +This example considers only the `verification-service`. The same approach works for all containers in the [Constellation container registry](https://github.com/orgs/edgelesssys/packages?repo_name=constellation). + +::: + + + +## Vulnerability scanning + +You can use a plethora of tools to consume SBOMs. This section provides suggestions for tools that are popular and known to produce reliable results, but any tool that consumes [SPDX](https://spdx.dev/) or [CycloneDX](https://cyclonedx.org/) files should work. + +Syft is able to [convert between the two formats](https://github.com/anchore/syft#format-conversion-experimental) in case you require a specific type. + +### Grype + +[Grype](https://github.com/anchore/grype) is a CLI tool that lends itself well for integration into CI/CD systems or local developer machines. It's also able to consume the signed attestation statement directly and does the verification in one go. + +```bash +grype att:verification-service.sbom --key cosign.pub --add-cpes-if-none -q +``` + +### Dependency Track + +[Dependency Track](https://dependencytrack.org/) is one of the oldest and most mature solutions when it comes to managing software inventory and vulnerabilities. Once imported, it continuously scans SBOMs for new vulnerabilities. It supports the CycloneDX format and provides direct guidance on how to comply with [U.S. Executive Order 14028](https://docs.dependencytrack.org/usage/executive-order-14028/). diff --git a/docs/versioned_docs/version-2.11/workflows/scale.md b/docs/versioned_docs/version-2.11/workflows/scale.md new file mode 100644 index 000000000..06898ad0c --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/scale.md @@ -0,0 +1,111 @@ +# Scale your cluster + +Constellation provides all features of a Kubernetes cluster including scaling and autoscaling. + +## Worker node scaling + +### Autoscaling + +Constellation comes with autoscaling disabled by default. To enable autoscaling, find the scaling group of +worker nodes: + +```bash +kubectl get scalinggroups -o json | yq '.items | .[] | select(.spec.role == "Worker") | [{"name": .metadata.name, "nodeGoupName": .spec.nodeGroupName}]' +``` + +This will output a list of scaling groups with the corresponding cloud provider name (`name`) and the cloud provider agnostic name of the node group (`nodeGroupName`). + +Then, patch the `autoscaling` field of the scaling group resource with the desired `name` to `true`: + +```bash +# Replace with the name of the scaling group you want to enable autoscaling for +worker_group= +kubectl patch scalinggroups $worker_group --patch '{"spec":{"autoscaling": true}}' --type='merge' +kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P +``` + +The cluster autoscaler now automatically provisions additional worker nodes so that all pods have a place to run. +You can configure the minimum and maximum number of worker nodes in the scaling group by patching the `min` or +`max` fields of the scaling group resource: + +```bash +kubectl patch scalinggroups $worker_group --patch '{"spec":{"max": 5}}' --type='merge' +kubectl get scalinggroup $worker_group -o jsonpath='{.spec}' | yq -P +``` + +The cluster autoscaler will now never provision more than 5 worker nodes. + +If you want to see the autoscaling in action, try to add a deployment with a lot of replicas, like the +following Nginx deployment. The number of replicas needed to trigger the autoscaling depends on the size of +and count of your worker nodes. Wait for the rollout of the deployment to finish and compare the number of +worker nodes before and after the deployment: + +```bash +kubectl create deployment nginx --image=nginx --replicas 150 +kubectl -n kube-system get nodes +kubectl rollout status deployment nginx +kubectl -n kube-system get nodes +``` + +### Manual scaling + +Alternatively, you can manually scale your cluster up or down: + + + + +1. Find your Constellation resource group. +2. Select the `scale-set-workers`. +3. Go to **settings** and **scaling**. +4. Set the new **instance count** and **save**. + + + + +1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). +2. **Edit** the **worker** instance group. +3. Set the new **number of instances** and **save**. + + + + +1. Go to Auto Scaling Groups and select the worker ASG to scale up. +2. Click **Edit** +3. Set the new (increased) **Desired capacity** and **Update**. + + + + +## Control-plane node scaling + +Control-plane nodes can **only be scaled manually and only scaled up**! + +To increase the number of control-plane nodes, follow these steps: + + + + + +1. Find your Constellation resource group. +2. Select the `scale-set-controlplanes`. +3. Go to **settings** and **scaling**. +4. Set the new (increased) **instance count** and **save**. + + + + +1. In Compute Engine go to [Instance Groups](https://console.cloud.google.com/compute/instanceGroups/). +2. **Edit** the **control-plane** instance group. +3. Set the new (increased) **number of instances** and **save**. + + + + +1. Go to Auto Scaling Groups and select the control-plane ASG to scale up. +2. Click **Edit** +3. Set the new (increased) **Desired capacity** and **Update**. + + + + +If you scale down the number of control-planes nodes, the removed nodes won't be able to exit the `etcd` cluster correctly. This will endanger the quorum that's required to run a stable Kubernetes control plane. diff --git a/docs/versioned_docs/version-2.11/workflows/storage.md b/docs/versioned_docs/version-2.11/workflows/storage.md new file mode 100644 index 000000000..9e3d96346 --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/storage.md @@ -0,0 +1,245 @@ +# Use persistent storage + +Persistent storage in Kubernetes requires cloud-specific configuration. +For abstraction of container storage, Kubernetes offers [volumes](https://kubernetes.io/docs/concepts/storage/volumes/), +allowing users to mount storage solutions directly into containers. +The [Container Storage Interface (CSI)](https://kubernetes-csi.github.io/docs/) is the standard interface for exposing arbitrary block and file storage systems into containers in Kubernetes. +Cloud service providers (CSPs) offer their own CSI-based solutions for cloud storage. + +## Confidential storage + +Most cloud storage solutions support encryption, such as [GCE Persistent Disks (PD)](https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek). +Constellation supports the available CSI-based storage options for Kubernetes engines in AWS, Azure, and GCP. +However, their encryption takes place in the storage backend and is managed by the CSP. +Thus, using the default CSI drivers for these storage types means trusting the CSP with your persistent data. + +To address this, Constellation provides CSI drivers for AWS EBS, Azure Disk, and GCE PD, offering [encryption on the node level](../architecture/keys.md#storage-encryption). They enable transparent encryption for persistent volumes without needing to trust the cloud backend. Plaintext data never leaves the confidential VM context, offering you confidential storage. + +For more details see [encrypted persistent storage](../architecture/encrypted-storage.md). + +## CSI drivers + +Constellation supports the following drivers, which offer node-level encryption and optional integrity protection. + + + + +**Constellation CSI driver for Azure Disk**: +Mount Azure [Disk Storage](https://azure.microsoft.com/en-us/services/storage/disks/#overview) into your Constellation cluster. +See the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-azuredisk-csi-driver) for more information. +Since Azure Disks are mounted as `ReadWriteOnce`, they're only available to a single pod. + + + + +**Constellation CSI driver for GCP Persistent Disk**: +Mount [Persistent Disk](https://cloud.google.com/persistent-disk) block storage into your Constellation cluster. +Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-gcp-compute-persistent-disk-csi-driver) for more information. + + + + +**Constellation CSI driver for AWS Elastic Block Store** +Mount [Elastic Block Store](https://aws.amazon.com/ebs/) storage volumes into your Constellation cluster. +Follow the instructions on how to [install the Constellation CSI driver](#installation) or check out the [repository](https://github.com/edgelesssys/constellation-aws-ebs-csi-driver) for more information. + + + + +Note that in case the options above aren't a suitable solution for you, Constellation is compatible with all other CSI-based storage options. For example, you can use [AWS EFS](https://docs.aws.amazon.com/en_en/eks/latest/userguide/efs-csi.html), [Azure Files](https://docs.microsoft.com/en-us/azure/storage/files/storage-files-introduction), or [GCP Filestore](https://cloud.google.com/filestore) with Constellation out of the box. Constellation is just not providing transparent encryption on the node level for these storage types yet. + +## Installation + +The Constellation CLI automatically installs Constellation's CSI driver for the selected CSP in your cluster. +If you don't need a CSI driver or wish to deploy your own, you can disable the automatic installation by setting `deployCSIDriver` to `false` in your Constellation config file. + + + + +Azure comes with two storage classes by default. + +* `encrypted-rwo` + * Uses [Standard SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#standard-ssds) + * ext-4 filesystem + * Encryption of all data written to disk +* `integrity-encrypted-rwo` + * Uses [Premium SSDs](https://learn.microsoft.com/en-us/azure/virtual-machines/disks-types#premium-ssds) + * ext-4 filesystem + * Encryption of all data written to disk + * Integrity protection of data written to disk + +For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). + +:::info + +The default storage class is set to `encrypted-rwo` for performance reasons. +If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. + +Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. +Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. + +Note that volume expansion isn't supported for integrity-protected disks. + +::: + + + + +GCP comes with two storage classes by default. + +* `encrypted-rwo` + * Uses [standard persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) + * ext-4 filesystem + * Encryption of all data written to disk +* `integrity-encrypted-rwo` + * Uses [performance (SSD) persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) + * ext-4 filesystem + * Encryption of all data written to disk + * Integrity protection of data written to disk + +For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). + +:::info + +The default storage class is set to `encrypted-rwo` for performance reasons. +If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. + +Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. +Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. + +Note that volume expansion isn't supported for integrity-protected disks. + +::: + + + + +AWS comes with two storage classes by default. + +* `encrypted-rwo` + * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) + * ext-4 filesystem + * Encryption of all data written to disk +* `integrity-encrypted-rwo` + * Uses [SSDs of `gp3` type](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-volume-types.html) + * ext-4 filesystem + * Encryption of all data written to disk + * Integrity protection of data written to disk + +For more information on encryption algorithms and key sizes, refer to [cryptographic algorithms](../architecture/encrypted-storage.md#cryptographic-algorithms). + +:::info + +The default storage class is set to `encrypted-rwo` for performance reasons. +If you want integrity-protected storage, set the `storageClassName` parameter of your persistent volume claim to `integrity-encrypted-rwo`. + +Alternatively, you can create your own storage class with integrity protection enabled by adding `csi.storage.k8s.io/fstype: ext4-integrity` to the class `parameters`. +Or use another filesystem by specifying another file system type with the suffix `-integrity`, e.g., `csi.storage.k8s.io/fstype: xfs-integrity`. + +Note that volume expansion isn't supported for integrity-protected disks. + +::: + + + + +1. Create a [persistent volume](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) + + A [persistent volume claim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) is a request for storage with certain properties. + It can refer to a storage class. + The following creates a persistent volume claim, requesting 20 GB of storage via the `encrypted-rwo` storage class: + + ```bash + cat < + +--- + +You can terminate your cluster using the CLI. For this, you need the Terraform state directory named [`constellation-terraform`](../reference/terraform.md) in the current directory. + +:::danger + +All ephemeral storage and state of your cluster will be lost. Make sure any data is safely stored in persistent storage. Constellation can recreate your cluster and the associated encryption keys, but won't backup your application data automatically. + +::: + + + +Terminate the cluster by running: + +```bash +constellation terminate +``` + +Or without confirmation (e.g., for automation purposes): + +```bash +constellation terminate --yes +``` + +This deletes all resources created by Constellation in your cloud environment. +All local files created by the `create` and `init` commands are deleted as well, except for `constellation-mastersecret.json` and the configuration file. + +:::caution + +Termination can fail if additional resources have been created that depend on the ones managed by Constellation. In this case, you need to delete these additional +resources manually. Just run the `terminate` command again afterward to continue the termination process of the cluster. + +::: + + + +Terminate the cluster by running: + +```bash +terraform destroy +``` + +Delete all files that are no longer needed: + +```bash +rm constellation-id.json constellation-admin.conf +``` + +Only the `constellation-mastersecret.json` and the configuration file remain. + + + diff --git a/docs/versioned_docs/version-2.11/workflows/troubleshooting.md b/docs/versioned_docs/version-2.11/workflows/troubleshooting.md new file mode 100644 index 000000000..a3e25a0fe --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/troubleshooting.md @@ -0,0 +1,168 @@ +# Troubleshooting + +This section aids you in finding problems when working with Constellation. + +## Common issues + +### Issues with creating new clusters + +When you create a new cluster, you should always use the [latest release](https://github.com/edgelesssys/constellation/releases/latest). +If something doesn't work, check out the [known issues](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22). + +### Azure: Resource Providers can't be registered + +On Azure, you may receive the following error when running `create` or `terminate` with limited IAM permissions: + +```shell-session +Error: Error ensuring Resource Providers are registered. + +Terraform automatically attempts to register the Resource Providers it supports to +ensure it's able to provision resources. + +If you don't have permission to register Resource Providers you may wish to use the +"skip_provider_registration" flag in the Provider block to disable this functionality. + +[...] +``` + +To continue, please ensure that the [required resource providers](../getting-started/install.md#required-permissions) have been registered in your subscription by your administrator. + +Afterward, set `ARM_SKIP_PROVIDER_REGISTRATION=true` as an environment variable and either run `create` or `terminate` again. +For example: + +```bash +ARM_SKIP_PROVIDER_REGISTRATION=true constellation create +``` + +Or alternatively, for `terminate`: + +```bash +ARM_SKIP_PROVIDER_REGISTRATION=true constellation terminate +``` + +### Nodes fail to join with error `untrusted measurement value` + +This error indicates that a node's [attestation statement](../architecture/attestation.md) contains measurements that don't match the trusted values expected by the [JoinService](../architecture/microservices.md#joinservice). +This may for example happen if the cloud provider updates the VM's firmware such that it influences the [runtime measurements](../architecture/attestation.md#runtime-measurements) in an unforeseen way. +A failed upgrade due to an erroneous attestation config can also cause this error. +You can change the expected measurements to resolve the failure. + +:::caution + +Attestation and trusted measurements are crucial for the security of your cluster. +Be extra careful when manually changing these settings. +When in doubt, check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). + +::: + +:::tip + +During an upgrade with modified attestation config, a backup of the current configuration is stored in the `join-config` config map in the `kube-system` namespace under the `attestationConfig_backup` key. To restore the old attestation config after a failed upgrade, replace the value of `attestationConfig` with the value from `attestationConfig_backup`: + +```bash +kubectl patch configmaps -n kube-system join-config -p "{\"data\":{\"attestationConfig\":\"$(kubectl get configmaps -n kube-system join-config -o "jsonpath={.data.attestationConfig_backup}")\"}}" +``` + +::: + +You can use the `upgrade apply` command to change measurements of a running cluster: + +1. Modify the `measurements` key in your local `constellation-conf.yaml` to the expected values. +2. Run `constellation upgrade apply`. + +Keep in mind that running `upgrade apply` also applies any version changes from your config to the cluster. + +You can run these commands to learn about the versions currently configured in the cluster: + +- Kubernetes API server version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.kubernetesClusterVersion` +- image version: `kubectl get nodeversion constellation-version -o json -n kube-system | jq .spec.imageVersion` +- microservices versions: `helm list --filter 'constellation-services' -n kube-system` + +### Upgrading Kubernetes resources fails + +Constellation manages its Kubernetes resources using Helm. +When applying an upgrade, the charts that are about to be installed, and a values override file `overrides.yaml`, +are saved to disk in your current workspace under `constellation-upgrade/upgrade-/helm-charts/`. +If upgrading the charts using the Constellation CLI fails, you can review these charts and try to manually apply the upgrade. + +:::caution + +Changing and manually applying the charts may destroy cluster resources and can lead to broken Constellation deployments. +Proceed with caution and when in doubt, +check if the encountered [issue is known](https://github.com/edgelesssys/constellation/issues?q=is%3Aopen+is%3Aissue+label%3A%22known+issue%22) or [contact support](https://github.com/edgelesssys/constellation#support). + +::: + +## Diagnosing issues + +### Cloud logging + +To provide information during early stages of a node's boot process, Constellation logs messages to the log systems of the cloud providers. Since these offerings **aren't** confidential, only generic information without any sensitive values is stored. This provides administrators with a high-level understanding of the current state of a node. + +You can view this information in the following places: + + + + +1. In your Azure subscription find the Constellation resource group. +2. Inside the resource group find the Application Insights resource called `constellation-insights-*`. +3. On the left-hand side go to `Logs`, which is located in the section `Monitoring`. + - Close the Queries page if it pops up. +5. In the query text field type in `traces`, and click `Run`. + +To **find the disk UUIDs** use the following query: `traces | where message contains "Disk UUID"` + + + + +1. Select the project that hosts Constellation. +2. Go to the `Compute Engine` service. +3. On the right-hand side of a VM entry select `More Actions` (a stacked ellipsis) + - Select `View logs` + +To **find the disk UUIDs** use the following query: `resource.type="gce_instance" text_payload=~"Disk UUID:.*\n" logName=~".*/constellation-boot-log"` + +:::info + +Constellation uses the default bucket to store logs. Its [default retention period is 30 days](https://cloud.google.com/logging/quotas#logs_retention_periods). + +::: + + + + +1. Open [AWS CloudWatch](https://console.aws.amazon.com/cloudwatch/home) +2. Select [Log Groups](https://console.aws.amazon.com/cloudwatch/home#logsV2:log-groups) +3. Select the log group that matches the name of your cluster. +4. Select the log stream for control or worker type nodes. + + + + +### Node shell access + +Debugging via a shell on a node is [directly supported by Kubernetes](https://kubernetes.io/docs/tasks/debug/debug-application/debug-running-pod/#node-shell-session). + +1. Figure out which node to connect to: + + ```bash + kubectl get nodes + # or to see more information, such as IPs: + kubectl get nodes -o wide + ``` + +2. Connect to the node: + + ```bash + kubectl debug node/constell-worker-xksa0-000000 -it --image=busybox + ``` + + You will be presented with a prompt. + + The nodes file system is mounted at `/host`. + +3. Once finished, clean up the debug pod: + + ```bash + kubectl delete pod node-debugger-constell-worker-xksa0-000000-bjthj + ``` diff --git a/docs/versioned_docs/version-2.11/workflows/trusted-launch.md b/docs/versioned_docs/version-2.11/workflows/trusted-launch.md new file mode 100644 index 000000000..13bd63ba6 --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/trusted-launch.md @@ -0,0 +1,53 @@ +# Use Azure trusted launch VMs + +Constellation also supports [trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/trusted-launch) on Microsoft Azure. Trusted launch VMs don't offer the same level of security as Confidential VMs, but are available in more regions and in larger quantities. The main difference between trusted launch VMs and normal VMs is that the former offer vTPM-based remote attestation. When used with trusted launch VMs, Constellation relies on vTPM-based remote attestation to verify nodes. + +:::caution + +Trusted launch VMs don't provide runtime encryption and don't keep the cloud service provider (CSP) out of your trusted computing base. + +::: + +Constellation supports trusted launch VMs with instance types `Standard_D*_v4` and `Standard_E*_v4`. Run `constellation config instance-types` for a list of all supported instance types. + +## VM images + +Azure currently doesn't support [community galleries for trusted launch VMs](https://docs.microsoft.com/en-us/azure/virtual-machines/share-gallery-community). Thus, you need to manually import the Constellation node image into your cloud subscription. + +The latest image is available at . Simply adjust the version number to download a newer version. + +After you've downloaded the image, create a resource group `constellation-images` in your Azure subscription and import the image. +You can use a script to do this: + +```bash +wget https://raw.githubusercontent.com/edgelesssys/constellation/main/hack/importAzure.sh +chmod +x importAzure.sh +AZURE_IMAGE_VERSION=2.2.0 AZURE_RESOURCE_GROUP_NAME=constellation-images AZURE_IMAGE_FILE=./constellation.img ./importAzure.sh +``` + +The script creates the following resources: +1. A new image gallery with the default name `constellation-import` +2. A new image definition with the default name `constellation` +3. The actual image with the provided version. In this case `2.2.0` + +Once the import is completed, use the `ID` of the image version in your `constellation-conf.yaml` for the `image` field. Set `confidentialVM` to `false`. + +Fetch the image measurements: + +```bash +IMAGE_VERSION=2.2.0 +URL=https://public-edgeless-constellation.s3.us-east-2.amazonaws.com//communitygalleries/constellationcvm-b3782fa0-0df7-4f2f-963e-fc7fc42663df/images/constellation/versions/$IMAGE_VERSION/measurements.yaml +constellation config fetch-measurements -u$URL -s$URL.sig +``` + +:::info + +The [constellation create](create.md) command will issue a warning because manually imported images aren't recognized as production grade images: + +```shell-session +Configured image doesn't look like a released production image. Double check image before deploying to production. +``` + +Please ignore this warning. + +::: diff --git a/docs/versioned_docs/version-2.11/workflows/upgrade.md b/docs/versioned_docs/version-2.11/workflows/upgrade.md new file mode 100644 index 000000000..e0a33734a --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/upgrade.md @@ -0,0 +1,110 @@ +# Upgrade your cluster + +Constellation provides an easy way to upgrade all components of your cluster, without disrupting it's availability. +Specifically, you can upgrade the Kubernetes version, the nodes' image, and the Constellation microservices. +You configure the desired versions in your local Constellation configuration and trigger upgrades with the `upgrade apply` command. +To learn about available versions you use the `upgrade check` command. +Which versions are available depends on the CLI version you are using. + +## Update the CLI + +Each CLI comes with a set of supported microservice and Kubernetes versions. +Most importantly, a given CLI version can only upgrade a cluster of the previous minor version, but not older ones. +This means that you have to upgrade your CLI and cluster one minor version at a time. + +For example, if you are currently on CLI version v2.6 and the latest version is v2.8, you should + +* upgrade the CLI to v2.7, +* upgrade the cluster to v2.7, +* and only then continue upgrading the CLI (and the cluster) to v2.8 after. + +Also note that if your current Kubernetes version isn't supported by the next CLI version, use your current CLI to upgrade to a newer Kubernetes version first. + +To learn which Kubernetes versions are supported by a particular CLI, run [constellation config kubernetes-versions](../reference/cli.md#constellation-config-kubernetes-versions). + +## Migrate the configuration + +The Constellation configuration file is located in the file `constellation-conf.yaml` in your workspace. +Refer to the [migration reference](../reference/migration.md) to check if you need to update fields in your configuration file. +Use [`constellation config migrate`](../reference/cli.md#constellation-config-migrate) to automatically update an old config file to a new format. + +## Check for upgrades + +To learn which versions the current CLI can upgrade to and what's installed in your cluster, run: + +```bash +# Show possible upgrades +constellation upgrade check + +# Show possible upgrades and write them to config file +constellation upgrade check --update-config +``` + +You can either enter the reported target versions into your config manually or run the above command with the `--update-config` flag. +When using this flag, the `kubernetesVersion`, `image`, `microserviceVersion`, and `attestation` fields are overwritten with the smallest available upgrade. + +## Apply the upgrade + +Once you updated your config with the desired versions, you can trigger the upgrade with this command: + +```bash +constellation upgrade apply +``` + +Microservice upgrades will be finished within a few minutes, depending on the cluster size. +If you are interested, you can monitor pods restarting in the `kube-system` namespace with your tool of choice. + +Image and Kubernetes upgrades take longer. +For each node in your cluster, a new node has to be created and joined. +The process usually takes up to ten minutes per node. + +When applying an upgrade, the Helm charts for the upgrade as well as backup files of Constellation-managed Custom Resource Definitions, Custom Resources, and Terraform state are created. +You can use the Terraform state backup to restore previous resources in case an upgrade misconfigured or erroneously deleted a resource. +You can use the Custom Resource (Definition) backup files to restore Custom Resources and Definitions manually (e.g., via `kubectl apply`) if the automatic migration of those resources fails. +You can use the Helm charts to manually apply upgrades to the Kubernetes resources, should an upgrade fail. + +:::note + +For advanced users: the upgrade consists of several phases that can be individually skipped through the `--skip-phases` flag. +The phases are `infrastracture` for the cloud resource management through Terraform, `helm` for the chart management of the microservices, `image` for OS image upgrades, and `k8s` for Kubernetes version upgrades. + +::: + +## Check the status + +Upgrades are asynchronous operations. +After you run `upgrade apply`, it will take a while until the upgrade has completed. +To understand if an upgrade is finished, you can run: + +```bash +constellation status +``` + +This command displays the following information: + +* The installed services and their versions +* The image and Kubernetes version the cluster is expecting on each node +* How many nodes are up to date + +Here's an example output: + +```shell-session +Target versions: + Image: v2.6.0 + Kubernetes: v1.25.8 +Service versions: + Cilium: v1.12.1 + cert-manager: v1.10.0 + constellation-operators: v2.6.0 + constellation-services: v2.6.0 +Cluster status: Some node versions are out of date + Image: 23/25 + Kubernetes: 25/25 +``` + +This output indicates that the cluster is running Kubernetes version `1.25.8`, and all nodes have the appropriate binaries installed. +23 out of 25 nodes have already upgraded to the targeted image version of `2.6.0`, while two are still in progress. + +## Apply further upgrades + +After the upgrade is finished, you can run `constellation upgrade check` again to see if there are more upgrades available. If so, repeat the process. diff --git a/docs/versioned_docs/version-2.11/workflows/verify-cli.md b/docs/versioned_docs/version-2.11/workflows/verify-cli.md new file mode 100644 index 000000000..1280c51b0 --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/verify-cli.md @@ -0,0 +1,125 @@ +# Verify the CLI + +:::info +This recording presents the essence of this page. It's recommended to read it in full for the motivation and all details. +::: + + + +--- + +Edgeless Systems uses [sigstore](https://www.sigstore.dev/) and [SLSA](https://slsa.dev) to ensure supply-chain security for the Constellation CLI and node images ("artifacts"). sigstore consists of three components: [Cosign](https://docs.sigstore.dev/signing/quickstart), [Rekor](https://docs.sigstore.dev/logging/overview), and Fulcio. Edgeless Systems uses Cosign to sign artifacts. All signatures are uploaded to the public Rekor transparency log, which resides at . + +:::note +The public key for Edgeless Systems' long-term code-signing key is: + +``` +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEf8F1hpmwE+YCFXzjGtaQcrL6XZVT +JmEe5iSLvG1SyQSAew7WdMKF6o9t8e2TFuCkzlOhhlws2OHWbiFZnFWCFw== +-----END PUBLIC KEY----- +``` + +The public key is also available for download at and in the Twitter profile [@EdgelessSystems](https://twitter.com/EdgelessSystems). +::: + +The Rekor transparency log is a public append-only ledger that verifies and records signatures and associated metadata. The Rekor transparency log enables everyone to observe the sequence of (software) signatures issued by Edgeless Systems and many other parties. The transparency log allows for the public identification of dubious or malicious signatures. + +You should always ensure that (1) your CLI executable was signed with the private key corresponding to the above public key and that (2) there is a corresponding entry in the Rekor transparency log. Both can be done as described in the following. + +:::info +You don't need to verify the Constellation node images. This is done automatically by your CLI and the rest of Constellation. +::: + +## Verify the signature + +First, [install the Cosign CLI](https://docs.sigstore.dev/system_config/installation). Next, [download](https://github.com/edgelesssys/constellation/releases) and verify the signature that accompanies your CLI executable, for example: + +```shell-session +$ cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 + +Verified OK +``` + +The above performs an offline verification of the provided public key, signature, and executable. To also verify that a corresponding entry exists in the public Rekor transparency log, add the variable `COSIGN_EXPERIMENTAL=1`: + +```shell-session +$ COSIGN_EXPERIMENTAL=1 cosign verify-blob --key https://edgeless.systems/es.pub --signature constellation-linux-amd64.sig constellation-linux-amd64 + +tlog entry verified with uuid: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 index: 3477047 +Verified OK +``` + +🏁 You now know that your CLI executable was officially released and signed by Edgeless Systems. + +### Optional: Manually inspect the transparency log + +To further inspect the public Rekor transparency log, [install the Rekor CLI](https://docs.sigstore.dev/logging/installation). A search for the CLI executable should give a single UUID. (Note that this UUID contains the UUID from the previous `cosign` command.) + +```shell-session +$ rekor-cli search --artifact constellation-linux-amd64 + +Found matching entries (listed by UUID): +362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 +``` + +With this UUID you can get the full entry from the transparency log: + +```shell-session +$ rekor-cli get --uuid=362f8ecba72f4326afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 + +LogID: c0d23d6ad406973f9559f3ba2d1ca01f84147d8ffc5b8445c224f98b9591801d +Index: 3477047 +IntegratedTime: 2022-09-12T22:28:16Z +UUID: afaba7f6635b3e058888692841848e5514357315be9528474b23f5dcccb82b13 +Body: { + "HashedRekordObj": { + "data": { + "hash": { + "algorithm": "sha256", + "value": "40e137b9b9b8204d672642fd1e181c6d5ccb50cfc5cc7fcbb06a8c2c78f44aff" + } + }, + "signature": { + "content": "MEUCIQCSER3mGj+j5Pr2kOXTlCIHQC3gT30I7qkLr9Awt6eUUQIgcLUKRIlY50UN8JGwVeNgkBZyYD8HMxwC/LFRWoMn180=", + "publicKey": { + "content": "LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFZjhGMWhwbXdFK1lDRlh6akd0YVFjckw2WFpWVApKbUVlNWlTTHZHMVN5UVNBZXc3V2RNS0Y2bzl0OGUyVEZ1Q2t6bE9oaGx3czJPSFdiaUZabkZXQ0Z3PT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0tCg==" + } + } + } +} +``` + +The field `publicKey` should contain Edgeless Systems' public key in Base64 encoding. + +You can get an exhaustive list of artifact signatures issued by Edgeless Systems via the following command: + +```bash +rekor-cli search --public-key https://edgeless.systems/es.pub --pki-format x509 +``` + +Edgeless Systems monitors this list to detect potential unauthorized use of its private key. + +## Verify the provenance + +Provenance attests that a software artifact was produced by a specific repository and build system invocation. For more information on provenance visit [slsa.dev](https://slsa.dev/provenance/v0.2) and learn about the [adoption of SLSA for Constellation](../reference/slsa.md). + +Just as checking its signature proves that the CLI hasn't been manipulated, checking the provenance proves that the artifact was produced by the expected build process and hasn't been tampered with. + +To verify the provenance, first install the [slsa-verifier](https://github.com/slsa-framework/slsa-verifier). Then make sure you have the provenance file (`constellation.intoto.jsonl`) and Constellation CLI downloaded. Both are available on the [GitHub release page](https://github.com/edgelesssys/constellation/releases). + +:::info +The same provenance file is valid for all Constellation CLI executables of a given version independent of the target platform. +::: + +Use the verifier to perform the check: + +```shell-session +$ slsa-verifier verify-artifact constellation-linux-amd64 \ + --provenance-path constellation.intoto.jsonl \ + --source-uri github.com/edgelesssys/constellation + +Verified signature against tlog entry index 7771317 at URL: https://rekor.sigstore.dev/api/v1/log/entries/24296fb24b8ad77af2c04c8b4ae0d5bc5... +Verified build using builder https://github.com/slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@refs/tags/v1.2.2 at commit 18e9924b416323c37b9cdfd6cc728de8a947424a +PASSED: Verified SLSA provenance +``` diff --git a/docs/versioned_docs/version-2.11/workflows/verify-cluster.md b/docs/versioned_docs/version-2.11/workflows/verify-cluster.md new file mode 100644 index 000000000..e97e02e37 --- /dev/null +++ b/docs/versioned_docs/version-2.11/workflows/verify-cluster.md @@ -0,0 +1,96 @@ +# Verify your cluster + +Constellation's [attestation feature](../architecture/attestation.md) allows you, or a third party, to verify the integrity and confidentiality of your Constellation cluster. + +## Fetch measurements + +To verify the integrity of Constellation you need trusted measurements to verify against. For each node image released by Edgeless Systems, there are signed measurements, which you can download using the CLI: + +```bash +constellation config fetch-measurements +``` + +This command performs the following steps: + +1. Download the signed measurements for the configured image. By default, this will use Edgeless Systems' public measurement registry. +2. Verify the signature of the measurements. This will use Edgeless Systems' [public key](https://edgeless.systems/es.pub). +3. Write measurements into configuration file. + +The configuration file then contains a list of `measurements` similar to the following: + +```yaml +# ... +measurements: + 0: + expected: "0f35c214608d93c7a6e68ae7359b4a8be5a0e99eea9107ece427c4dea4e439cf" + warnOnly: false + 4: + expected: "02c7a67c01ec70ffaf23d73a12f749ab150a8ac6dc529bda2fe1096a98bf42ea" + warnOnly: false + 5: + expected: "e6949026b72e5045706cd1318889b3874480f7a3f7c5c590912391a2d15e6975" + warnOnly: true + 8: + expected: "0000000000000000000000000000000000000000000000000000000000000000" + warnOnly: false + 9: + expected: "f0a6e8601b00e2fdc57195686cd4ef45eb43a556ac1209b8e25d993213d68384" + warnOnly: false + 11: + expected: "0000000000000000000000000000000000000000000000000000000000000000" + warnOnly: false + 12: + expected: "da99eb6cf7c7fbb692067c87fd5ca0b7117dc293578e4fea41f95d3d3d6af5e2" + warnOnly: false + 13: + expected: "0000000000000000000000000000000000000000000000000000000000000000" + warnOnly: false + 14: + expected: "d7c4cc7ff7933022f013e03bdee875b91720b5b86cf1753cad830f95e791926f" + warnOnly: true + 15: + expected: "0000000000000000000000000000000000000000000000000000000000000000" + warnOnly: false +# ... +``` + +Each entry specifies the expected value of the Constellation node, and whether the measurement should be enforced (`warnOnly: false`), or only a warning should be logged (`warnOnly: true`). +By default, the subset of the [available measurements](../architecture/attestation.md#runtime-measurements) that can be locally reproduced and verified is enforced. + +During attestation, the validating side (CLI or [join service](../architecture/microservices.md#joinservice)) compares each measurement reported by the issuing side (first node or joining node) individually. +For mismatching measurements that have set `warnOnly` to `true` only a warning is emitted. +For mismatching measurements that have set `warnOnly` to `false` an error is emitted and attestation fails. +If attestation fails for a new node, it isn't permitted to join the cluster. + +## The *verify* command + +:::note +The steps below are purely optional. They're automatically executed by `constellation init` when you initialize your cluster. The `constellation verify` command mostly has an illustrative purpose. +::: + +The `verify` command obtains and verifies an attestation statement from a running Constellation cluster. + +```bash +constellation verify [--cluster-id ...] +``` + +From the attestation statement, the command verifies the following properties: + +* The cluster is using the correct Confidential VM (CVM) type. +* Inside the CVMs, the correct node images are running. The node images are identified through the measurements obtained in the previous step. +* The unique ID of the cluster matches the one from your `constellation-id.json` file or passed in via `--cluster-id`. + +Once the above properties are verified, you know that you are talking to the right Constellation cluster and it's in a good and trustworthy shape. + +### Custom arguments + +The `verify` command also allows you to verify any Constellation deployment that you have network access to. For this you need the following: + +* The IP address of a running Constellation cluster's [VerificationService](../architecture/microservices.md#verificationservice). The `VerificationService` is exposed via a `NodePort` service using the external IP address of your cluster. Run `kubectl get nodes -o wide` and look for `EXTERNAL-IP`. +* The cluster's *clusterID*. See [cluster identity](../architecture/keys.md#cluster-identity) for more details. + +For example: + +```shell-session +constellation verify -e 192.0.2.1 --cluster-id Q29uc3RlbGxhdGlvbkRvY3VtZW50YXRpb25TZWNyZXQ= +``` diff --git a/docs/versioned_sidebars/version-2.11-sidebars.json b/docs/versioned_sidebars/version-2.11-sidebars.json new file mode 100644 index 000000000..02898994d --- /dev/null +++ b/docs/versioned_sidebars/version-2.11-sidebars.json @@ -0,0 +1,269 @@ +{ + "docs": [ + { + "type": "doc", + "label": "Introduction", + "id": "intro" + }, + { + "type": "category", + "label": "Basics", + "link": { + "type": "generated-index" + }, + "items": [ + { + "type": "doc", + "label": "Confidential Kubernetes", + "id": "overview/confidential-kubernetes" + }, + { + "type": "doc", + "label": "Security benefits", + "id": "overview/security-benefits" + }, + { + "type": "doc", + "label": "Product features", + "id": "overview/product" + }, + { + "type": "doc", + "label": "Feature status of clouds", + "id": "overview/clouds" + }, + { + "type": "category", + "label": "Performance", + "link": { + "type": "doc", + "id": "overview/performance/performance" + }, + "items": [ + { + "type": "doc", + "label": "I/O benchmarks", + "id": "overview/performance/io" + }, + { + "type": "doc", + "label": "Application benchmarks", + "id": "overview/performance/application" + } + ] + }, + { + "type": "doc", + "label": "License", + "id": "overview/license" + } + ] + }, + { + "type": "category", + "label": "Getting started", + "link": { + "type": "generated-index" + }, + "items": [ + { + "type": "doc", + "label": "Installation", + "id": "getting-started/install" + }, + { + "type": "doc", + "label": "First steps (cloud)", + "id": "getting-started/first-steps" + }, + { + "type": "doc", + "label": "First steps (local)", + "id": "getting-started/first-steps-local" + }, + { + "type": "category", + "label": "Examples", + "link": { + "type": "doc", + "id": "getting-started/examples" + }, + "items": [ + { + "type": "doc", + "label": "Emojivoto", + "id": "getting-started/examples/emojivoto" + }, + { + "type": "doc", + "label": "Online Boutique", + "id": "getting-started/examples/online-boutique" + }, + { + "type": "doc", + "label": "Horizontal Pod Autoscaling", + "id": "getting-started/examples/horizontal-scaling" + } + ] + } + ] + }, + { + "type": "category", + "label": "Workflows", + "link": { + "type": "generated-index" + }, + "items": [ + { + "type": "doc", + "label": "Verify the CLI", + "id": "workflows/verify-cli" + }, + { + "type": "doc", + "label": "Configure your cluster", + "id": "workflows/config" + }, + { + "type": "doc", + "label": "Create your cluster", + "id": "workflows/create" + }, + { + "type": "doc", + "label": "Scale your cluster", + "id": "workflows/scale" + }, + { + "type": "doc", + "label": "Upgrade your cluster", + "id": "workflows/upgrade" + }, + { + "type": "doc", + "label": "Expose a service", + "id": "workflows/lb" + }, + { + "type": "doc", + "label": "Install cert-manager", + "id": "workflows/cert-manager" + }, + { + "type": "doc", + "label": "Terminate your cluster", + "id": "workflows/terminate" + }, + { + "type": "doc", + "label": "Recover your cluster", + "id": "workflows/recovery" + }, + { + "type": "doc", + "label": "Verify your cluster", + "id": "workflows/verify-cluster" + }, + { + "type": "doc", + "label": "Use persistent storage", + "id": "workflows/storage" + }, + { + "type": "doc", + "label": "Consume SBOMs", + "id": "workflows/sbom" + }, + { + "type": "doc", + "label": "Troubleshooting", + "id": "workflows/troubleshooting" + } + ] + }, + { + "type": "category", + "label": "Architecture", + "link": { + "type": "generated-index" + }, + "items": [ + { + "type": "doc", + "label": "Overview", + "id": "architecture/overview" + }, + { + "type": "doc", + "label": "Cluster orchestration", + "id": "architecture/orchestration" + }, + { + "type": "doc", + "label": "Versions and support", + "id": "architecture/versions" + }, + { + "type": "doc", + "label": "Microservices", + "id": "architecture/microservices" + }, + { + "type": "doc", + "label": "Attestation", + "id": "architecture/attestation" + }, + { + "type": "doc", + "label": "Images", + "id": "architecture/images" + }, + { + "type": "doc", + "label": "Keys and cryptographic primitives", + "id": "architecture/keys" + }, + { + "type": "doc", + "label": "Encrypted persistent storage", + "id": "architecture/encrypted-storage" + }, + { + "type": "doc", + "label": "Networking", + "id": "architecture/networking" + } + ] + }, + { + "type": "category", + "label": "Reference", + "link": { + "type": "generated-index" + }, + "items": [ + { + "type": "doc", + "label": "CLI", + "id": "reference/cli" + }, + { + "type": "doc", + "label": "Configuration migrations", + "id": "reference/migration" + }, + { + "type": "doc", + "label": "Terraform usage", + "id": "reference/terraform" + }, + { + "type": "doc", + "label": "SLSA adoption", + "id": "reference/slsa" + } + ] + } + ] +} diff --git a/docs/versions.json b/docs/versions.json index 47070b080..2ee67e480 100644 --- a/docs/versions.json +++ b/docs/versions.json @@ -1,4 +1,5 @@ [ + "2.11", "2.10", "2.9", "2.8",