mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-24 06:59:40 -05:00
Implement activation service
Signed-off-by: Daniel Weiße <dw@edgeless.systems>
This commit is contained in:
parent
0941ce8c7e
commit
b461c40c3a
@ -16,6 +16,8 @@
|
||||
admin.conf
|
||||
coordinator-*
|
||||
|
||||
go.work
|
||||
go.work.sum
|
||||
/image
|
||||
|
||||
# Dockerfiles
|
||||
|
30
activation/Dockerfile
Normal file
30
activation/Dockerfile
Normal file
@ -0,0 +1,30 @@
|
||||
FROM fedora@sha256:36af84ba69e21c9ef86a0424a090674c433b2b80c2462e57503886f1d823abe8 as build
|
||||
|
||||
RUN dnf -y update && \
|
||||
dnf install -y iproute iputils wget git && \
|
||||
dnf clean all
|
||||
|
||||
# Install Go
|
||||
ARG GO_VER=1.18
|
||||
RUN wget https://go.dev/dl/go${GO_VER}.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xzf go${GO_VER}.linux-amd64.tar.gz && \
|
||||
rm go${GO_VER}.linux-amd64.tar.gz
|
||||
ENV PATH ${PATH}:/usr/local/go/bin
|
||||
|
||||
# Download go dependencies
|
||||
WORKDIR /constellation/
|
||||
COPY go.mod ./
|
||||
COPY go.sum ./
|
||||
RUN go mod download all
|
||||
|
||||
# Copy Repo
|
||||
COPY . /constellation
|
||||
RUN rm -rf ./hack/
|
||||
|
||||
WORKDIR /constellation/activation
|
||||
ARG PROJECT_VERSION=v0.0.0
|
||||
RUN CGO_ENABLED=0 go build -o activation-service -trimpath -buildvcs=false -ldflags "-s -w -buildid='' -X main.versionInfo=${PROJECT_VERSION}" ./cmd/
|
||||
|
||||
FROM scratch as release
|
||||
COPY --from=build /constellation/activation/activation-service /activation
|
||||
ENTRYPOINT [ "/activation" ]
|
71
activation/README.md
Normal file
71
activation/README.md
Normal file
@ -0,0 +1,71 @@
|
||||
# Activation
|
||||
|
||||
Implementation for Constellation's node activation flow.
|
||||
|
||||
The activation service runs on each control-plane node of the Kubernetes cluster.
|
||||
New nodes (at cluster start, or later through autoscaling) send an activation request to the service over [aTLS](../coordinator/atls/).
|
||||
The activation service verifies the new nodes certificate and attestation statement.
|
||||
If attestation is successful, the new node is supplied with a disk encryption key for its state disk, and a Kubernetes bootstrap token, so it may join the cluster.
|
||||
|
||||
The activation service uses klog v2 for logging.
|
||||
Use the `-v` flag to set the log verbosity level.
|
||||
Use different verbosity levels during development depending on the information:
|
||||
|
||||
* 2 for information that should always be logged. Examples: server starting, new gRPC request.
|
||||
|
||||
* 4 for general logging. If you are unsure what log level to use, use 4.
|
||||
|
||||
* 6 for low level information logging. Example: values of new expected measurements
|
||||
|
||||
* Potentially sensitive information, such as return values of functions should never be logged.
|
||||
|
||||
## Packages
|
||||
|
||||
### [activationproto](./activationproto/)
|
||||
|
||||
Proto definitions for the activation service.
|
||||
|
||||
### [server](./server/)
|
||||
|
||||
The `server` implements gRPC endpoints for joining the cluster and holds the main application logic.
|
||||
|
||||
Connections between the activation service and joining nodes are secured using [aTLS](../internal/atls/README.md)
|
||||
|
||||
Worker nodes call the `ActivateNode` endpoint.
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant New Node
|
||||
participant Activation Service
|
||||
New Node-->>Activation Service: aTLS Handshake (server side verification)
|
||||
Activation Service-->>New Node:
|
||||
New Node->>+Activation Service: grpc::ActivateNode(DiskUUID)
|
||||
Activation Service->>+KMS: grpc::GetDataKey(DiskUUID)
|
||||
KMS->>-Activation Service: DiskEncryptionKey
|
||||
Activation Service->>-New Node: [DiskEncryptionKey, KubernetesJoinToken]
|
||||
```
|
||||
|
||||
Control-plane nodes call the `ActivateCoordinator` endpoint.
|
||||
|
||||
### [kms](./kms/)
|
||||
|
||||
Implements interaction with Constellation's key management service.
|
||||
This is needed for fetching data encryption keys for joining nodes.
|
||||
|
||||
### [kubeadm](./kubeadm/)
|
||||
|
||||
Implements interaction with the Kubernetes API to create join tokens for new nodes.
|
||||
|
||||
### [validator](./validator/)
|
||||
|
||||
A wrapper for the more generic `atls.Validator`, allowing for updates to the underlying validator without having to restart the service.
|
||||
|
||||
### [watcher](./watcher/)
|
||||
|
||||
Uses fsnotify to wait for expected measurement updates, and updates the validator if any occur.
|
||||
|
||||
## [Dockerfile](./Dockerfile)
|
||||
|
||||
```shell
|
||||
DOCKER_BUILDKIT=1 docker build --build-arg PROJECT_VERSION="v1.0.0" -t ghcr.io/edgelesssys/activation-service:v1.0.0 -f activation/Dockerfile .
|
||||
```
|
383
activation/activationproto/activation.pb.go
Normal file
383
activation/activationproto/activation.pb.go
Normal file
@ -0,0 +1,383 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.0
|
||||
// protoc v3.20.1
|
||||
// source: activation.proto
|
||||
|
||||
package activationproto
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type ActivateNodeRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
DiskUuid string `protobuf:"bytes,1,opt,name=disk_uuid,json=diskUuid,proto3" json:"disk_uuid,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ActivateNodeRequest) Reset() {
|
||||
*x = ActivateNodeRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_activation_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ActivateNodeRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ActivateNodeRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ActivateNodeRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_activation_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ActivateNodeRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ActivateNodeRequest) Descriptor() ([]byte, []int) {
|
||||
return file_activation_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *ActivateNodeRequest) GetDiskUuid() string {
|
||||
if x != nil {
|
||||
return x.DiskUuid
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ActivateNodeResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
StateDiskKey []byte `protobuf:"bytes,1,opt,name=state_disk_key,json=stateDiskKey,proto3" json:"state_disk_key,omitempty"`
|
||||
OwnerId []byte `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"`
|
||||
ClusterId []byte `protobuf:"bytes,3,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
|
||||
ApiServerEndpoint string `protobuf:"bytes,4,opt,name=api_server_endpoint,json=apiServerEndpoint,proto3" json:"api_server_endpoint,omitempty"`
|
||||
Token string `protobuf:"bytes,5,opt,name=token,proto3" json:"token,omitempty"`
|
||||
DiscoveryTokenCaCertHash string `protobuf:"bytes,6,opt,name=discovery_token_ca_cert_hash,json=discoveryTokenCaCertHash,proto3" json:"discovery_token_ca_cert_hash,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ActivateNodeResponse) Reset() {
|
||||
*x = ActivateNodeResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_activation_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ActivateNodeResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ActivateNodeResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ActivateNodeResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_activation_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ActivateNodeResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ActivateNodeResponse) Descriptor() ([]byte, []int) {
|
||||
return file_activation_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *ActivateNodeResponse) GetStateDiskKey() []byte {
|
||||
if x != nil {
|
||||
return x.StateDiskKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ActivateNodeResponse) GetOwnerId() []byte {
|
||||
if x != nil {
|
||||
return x.OwnerId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ActivateNodeResponse) GetClusterId() []byte {
|
||||
if x != nil {
|
||||
return x.ClusterId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ActivateNodeResponse) GetApiServerEndpoint() string {
|
||||
if x != nil {
|
||||
return x.ApiServerEndpoint
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ActivateNodeResponse) GetToken() string {
|
||||
if x != nil {
|
||||
return x.Token
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *ActivateNodeResponse) GetDiscoveryTokenCaCertHash() string {
|
||||
if x != nil {
|
||||
return x.DiscoveryTokenCaCertHash
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ActivateCoordinatorRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ActivateCoordinatorRequest) Reset() {
|
||||
*x = ActivateCoordinatorRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_activation_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ActivateCoordinatorRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ActivateCoordinatorRequest) ProtoMessage() {}
|
||||
|
||||
func (x *ActivateCoordinatorRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_activation_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ActivateCoordinatorRequest.ProtoReflect.Descriptor instead.
|
||||
func (*ActivateCoordinatorRequest) Descriptor() ([]byte, []int) {
|
||||
return file_activation_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
type ActivateCoordinatorResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *ActivateCoordinatorResponse) Reset() {
|
||||
*x = ActivateCoordinatorResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_activation_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ActivateCoordinatorResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ActivateCoordinatorResponse) ProtoMessage() {}
|
||||
|
||||
func (x *ActivateCoordinatorResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_activation_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ActivateCoordinatorResponse.ProtoReflect.Descriptor instead.
|
||||
func (*ActivateCoordinatorResponse) Descriptor() ([]byte, []int) {
|
||||
return file_activation_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
var File_activation_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_activation_proto_rawDesc = []byte{
|
||||
0x0a, 0x10, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x12, 0x06, 0x70, 0x75, 0x62, 0x61, 0x70, 0x69, 0x22, 0x32, 0x0a, 0x13, 0x41, 0x63,
|
||||
0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x69, 0x73, 0x6b, 0x55, 0x75, 0x69, 0x64, 0x22, 0xfc,
|
||||
0x01, 0x0a, 0x14, 0x41, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x74, 0x65,
|
||||
0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x4b, 0x65, 0x79, 0x12, 0x19, 0x0a,
|
||||
0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73,
|
||||
0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6c,
|
||||
0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2e, 0x0a, 0x13, 0x61, 0x70, 0x69, 0x5f, 0x73,
|
||||
0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x45,
|
||||
0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
|
||||
0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3e, 0x0a,
|
||||
0x1c, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
|
||||
0x5f, 0x63, 0x61, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x06, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x54, 0x6f,
|
||||
0x6b, 0x65, 0x6e, 0x43, 0x61, 0x43, 0x65, 0x72, 0x74, 0x48, 0x61, 0x73, 0x68, 0x22, 0x1c, 0x0a,
|
||||
0x1a, 0x41, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e,
|
||||
0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x1d, 0x0a, 0x1b, 0x41,
|
||||
0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74,
|
||||
0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb0, 0x01, 0x0a, 0x03, 0x41,
|
||||
0x50, 0x49, 0x12, 0x49, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x4e, 0x6f,
|
||||
0x64, 0x65, 0x12, 0x1b, 0x2e, 0x70, 0x75, 0x62, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69,
|
||||
0x76, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
|
||||
0x1c, 0x2e, 0x70, 0x75, 0x62, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74,
|
||||
0x65, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a,
|
||||
0x13, 0x41, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e,
|
||||
0x61, 0x74, 0x6f, 0x72, 0x12, 0x22, 0x2e, 0x70, 0x75, 0x62, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63,
|
||||
0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x74, 0x6f,
|
||||
0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x70, 0x75, 0x62, 0x61, 0x70,
|
||||
0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6f, 0x72, 0x64, 0x69,
|
||||
0x6e, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x48, 0x5a,
|
||||
0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x64, 0x67, 0x65,
|
||||
0x6c, 0x65, 0x73, 0x73, 0x73, 0x79, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x65, 0x6c, 0x6c,
|
||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||
0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x69,
|
||||
0x6f, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_activation_proto_rawDescOnce sync.Once
|
||||
file_activation_proto_rawDescData = file_activation_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_activation_proto_rawDescGZIP() []byte {
|
||||
file_activation_proto_rawDescOnce.Do(func() {
|
||||
file_activation_proto_rawDescData = protoimpl.X.CompressGZIP(file_activation_proto_rawDescData)
|
||||
})
|
||||
return file_activation_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_activation_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
|
||||
var file_activation_proto_goTypes = []interface{}{
|
||||
(*ActivateNodeRequest)(nil), // 0: pubapi.ActivateNodeRequest
|
||||
(*ActivateNodeResponse)(nil), // 1: pubapi.ActivateNodeResponse
|
||||
(*ActivateCoordinatorRequest)(nil), // 2: pubapi.ActivateCoordinatorRequest
|
||||
(*ActivateCoordinatorResponse)(nil), // 3: pubapi.ActivateCoordinatorResponse
|
||||
}
|
||||
var file_activation_proto_depIdxs = []int32{
|
||||
0, // 0: pubapi.API.ActivateNode:input_type -> pubapi.ActivateNodeRequest
|
||||
2, // 1: pubapi.API.ActivateCoordinator:input_type -> pubapi.ActivateCoordinatorRequest
|
||||
1, // 2: pubapi.API.ActivateNode:output_type -> pubapi.ActivateNodeResponse
|
||||
3, // 3: pubapi.API.ActivateCoordinator:output_type -> pubapi.ActivateCoordinatorResponse
|
||||
2, // [2:4] is the sub-list for method output_type
|
||||
0, // [0:2] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_activation_proto_init() }
|
||||
func file_activation_proto_init() {
|
||||
if File_activation_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_activation_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ActivateNodeRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_activation_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ActivateNodeResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_activation_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ActivateCoordinatorRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_activation_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ActivateCoordinatorResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_activation_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_activation_proto_goTypes,
|
||||
DependencyIndexes: file_activation_proto_depIdxs,
|
||||
MessageInfos: file_activation_proto_msgTypes,
|
||||
}.Build()
|
||||
File_activation_proto = out.File
|
||||
file_activation_proto_rawDesc = nil
|
||||
file_activation_proto_goTypes = nil
|
||||
file_activation_proto_depIdxs = nil
|
||||
}
|
31
activation/activationproto/activation.proto
Normal file
31
activation/activationproto/activation.proto
Normal file
@ -0,0 +1,31 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package pubapi;
|
||||
|
||||
option go_package = "github.com/edgelesssys/constellation/activation/server/activationproto";
|
||||
|
||||
service API {
|
||||
rpc ActivateNode(ActivateNodeRequest) returns (ActivateNodeResponse);
|
||||
rpc ActivateCoordinator(ActivateCoordinatorRequest) returns (ActivateCoordinatorResponse);
|
||||
}
|
||||
|
||||
|
||||
message ActivateNodeRequest {
|
||||
string disk_uuid = 1;
|
||||
}
|
||||
|
||||
message ActivateNodeResponse {
|
||||
bytes state_disk_key = 1;
|
||||
bytes owner_id = 2;
|
||||
bytes cluster_id = 3;
|
||||
string api_server_endpoint = 4;
|
||||
string token = 5;
|
||||
string discovery_token_ca_cert_hash = 6;
|
||||
}
|
||||
|
||||
|
||||
message ActivateCoordinatorRequest {
|
||||
}
|
||||
|
||||
message ActivateCoordinatorResponse {
|
||||
}
|
141
activation/activationproto/activation_grpc.pb.go
Normal file
141
activation/activationproto/activation_grpc.pb.go
Normal file
@ -0,0 +1,141 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.20.1
|
||||
// source: activation.proto
|
||||
|
||||
package activationproto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// APIClient is the client API for API service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type APIClient interface {
|
||||
ActivateNode(ctx context.Context, in *ActivateNodeRequest, opts ...grpc.CallOption) (*ActivateNodeResponse, error)
|
||||
ActivateCoordinator(ctx context.Context, in *ActivateCoordinatorRequest, opts ...grpc.CallOption) (*ActivateCoordinatorResponse, error)
|
||||
}
|
||||
|
||||
type aPIClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewAPIClient(cc grpc.ClientConnInterface) APIClient {
|
||||
return &aPIClient{cc}
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateNode(ctx context.Context, in *ActivateNodeRequest, opts ...grpc.CallOption) (*ActivateNodeResponse, error) {
|
||||
out := new(ActivateNodeResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/ActivateNode", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *aPIClient) ActivateCoordinator(ctx context.Context, in *ActivateCoordinatorRequest, opts ...grpc.CallOption) (*ActivateCoordinatorResponse, error) {
|
||||
out := new(ActivateCoordinatorResponse)
|
||||
err := c.cc.Invoke(ctx, "/pubapi.API/ActivateCoordinator", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// APIServer is the server API for API service.
|
||||
// All implementations must embed UnimplementedAPIServer
|
||||
// for forward compatibility
|
||||
type APIServer interface {
|
||||
ActivateNode(context.Context, *ActivateNodeRequest) (*ActivateNodeResponse, error)
|
||||
ActivateCoordinator(context.Context, *ActivateCoordinatorRequest) (*ActivateCoordinatorResponse, error)
|
||||
mustEmbedUnimplementedAPIServer()
|
||||
}
|
||||
|
||||
// UnimplementedAPIServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedAPIServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedAPIServer) ActivateNode(context.Context, *ActivateNodeRequest) (*ActivateNodeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ActivateNode not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) ActivateCoordinator(context.Context, *ActivateCoordinatorRequest) (*ActivateCoordinatorResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ActivateCoordinator not implemented")
|
||||
}
|
||||
func (UnimplementedAPIServer) mustEmbedUnimplementedAPIServer() {}
|
||||
|
||||
// UnsafeAPIServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to APIServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeAPIServer interface {
|
||||
mustEmbedUnimplementedAPIServer()
|
||||
}
|
||||
|
||||
func RegisterAPIServer(s grpc.ServiceRegistrar, srv APIServer) {
|
||||
s.RegisterService(&API_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _API_ActivateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActivateNodeRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).ActivateNode(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/ActivateNode",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).ActivateNode(ctx, req.(*ActivateNodeRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _API_ActivateCoordinator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActivateCoordinatorRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(APIServer).ActivateCoordinator(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pubapi.API/ActivateCoordinator",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(APIServer).ActivateCoordinator(ctx, req.(*ActivateCoordinatorRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// API_ServiceDesc is the grpc.ServiceDesc for API service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var API_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "pubapi.API",
|
||||
HandlerType: (*APIServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "ActivateNode",
|
||||
Handler: _API_ActivateNode_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ActivateCoordinator",
|
||||
Handler: _API_ActivateCoordinator_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "activation.proto",
|
||||
}
|
67
activation/cmd/main.go
Normal file
67
activation/cmd/main.go
Normal file
@ -0,0 +1,67 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"github.com/edgelesssys/constellation/activation/kms"
|
||||
"github.com/edgelesssys/constellation/activation/kubeadm"
|
||||
"github.com/edgelesssys/constellation/activation/server"
|
||||
"github.com/edgelesssys/constellation/activation/validator"
|
||||
"github.com/edgelesssys/constellation/activation/watcher"
|
||||
"github.com/edgelesssys/constellation/coordinator/atls"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
bindPort = "9090"
|
||||
)
|
||||
|
||||
func main() {
|
||||
provider := flag.String("cloud-provider", "", "cloud service provider this binary is running on")
|
||||
kmsEndpoint := flag.String("kms-endpoint", "", "endpoint of Constellations key management service")
|
||||
|
||||
klog.InitFlags(nil)
|
||||
flag.Parse()
|
||||
klog.V(2).Infof("\nConstellation Node Activation Service\nVersion: %s\nRunning on: %s", constants.VersionInfo, *provider)
|
||||
|
||||
handler := file.NewHandler(afero.NewOsFs())
|
||||
|
||||
validator, err := validator.New(*provider, handler)
|
||||
if err != nil {
|
||||
flag.Usage()
|
||||
klog.Exitf("failed to create validator: %s", err)
|
||||
}
|
||||
|
||||
tlsConfig, err := atls.CreateAttestationServerTLSConfig(nil, []atls.Validator{validator})
|
||||
if err != nil {
|
||||
klog.Exitf("unable to create server config: %s", err)
|
||||
}
|
||||
|
||||
kubeadm, err := kubeadm.New()
|
||||
if err != nil {
|
||||
klog.Exitf("failed to create kubeadm: %s", err)
|
||||
}
|
||||
kms := kms.New(*kmsEndpoint)
|
||||
|
||||
server := server.New(handler, kubeadm, kms)
|
||||
|
||||
watcher, err := watcher.New(validator)
|
||||
if err != nil {
|
||||
klog.Exitf("failed to create watcher for measurements updates: %s", err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
go func() {
|
||||
klog.V(4).Infof("starting file watcher for measurements file %s", constants.ActivationMeasurementsFilename)
|
||||
if err := watcher.Watch(constants.ActivationMeasurementsFilename); err != nil {
|
||||
klog.Exitf("failed to watch measurements file: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := server.Run(tlsConfig, bindPort); err != nil {
|
||||
klog.Exitf("failed to run server: %s", err)
|
||||
}
|
||||
}
|
59
activation/kms/kms.go
Normal file
59
activation/kms/kms.go
Normal file
@ -0,0 +1,59 @@
|
||||
package kms
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
// Client interacts with Constellation's key management service.
|
||||
type Client struct {
|
||||
endpoint string
|
||||
grpc grpcClient
|
||||
}
|
||||
|
||||
// New creates a new KMS.
|
||||
func New(endpoint string) Client {
|
||||
return Client{
|
||||
endpoint: endpoint,
|
||||
grpc: client{},
|
||||
}
|
||||
}
|
||||
|
||||
// GetDEK returns a data encryption key for the given UUID.
|
||||
func (c Client) GetDataKey(ctx context.Context, uuid string, length int) ([]byte, error) {
|
||||
// TODO: update credentials if we enable aTLS on the KMS
|
||||
// For now this is fine since traffic is only routed through the Constellation cluster
|
||||
conn, err := grpc.DialContext(ctx, c.endpoint, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
res, err := c.grpc.GetDataKey(
|
||||
ctx,
|
||||
&kmsproto.GetDataKeyRequest{
|
||||
DataKeyId: uuid,
|
||||
Length: uint32(length),
|
||||
},
|
||||
conn,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("fetching data encryption key from Constellation KMS: %w", err)
|
||||
}
|
||||
|
||||
return res.DataKey, nil
|
||||
}
|
||||
|
||||
type grpcClient interface {
|
||||
GetDataKey(context.Context, *kmsproto.GetDataKeyRequest, *grpc.ClientConn) (*kmsproto.GetDataKeyResponse, error)
|
||||
}
|
||||
|
||||
type client struct{}
|
||||
|
||||
func (c client) GetDataKey(ctx context.Context, req *kmsproto.GetDataKeyRequest, conn *grpc.ClientConn) (*kmsproto.GetDataKeyResponse, error) {
|
||||
return kmsproto.NewAPIClient(conn).GetDataKey(ctx, req)
|
||||
}
|
57
activation/kms/kms_test.go
Normal file
57
activation/kms/kms_test.go
Normal file
@ -0,0 +1,57 @@
|
||||
package kms
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/kms/server/kmsapi/kmsproto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/test/bufconn"
|
||||
)
|
||||
|
||||
type stubClient struct {
|
||||
getDataKeyErr error
|
||||
dataKey []byte
|
||||
}
|
||||
|
||||
func (c *stubClient) GetDataKey(context.Context, *kmsproto.GetDataKeyRequest, *grpc.ClientConn) (*kmsproto.GetDataKeyResponse, error) {
|
||||
return &kmsproto.GetDataKeyResponse{DataKey: c.dataKey}, c.getDataKeyErr
|
||||
}
|
||||
|
||||
func TestGetDataKey(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
client *stubClient
|
||||
wantErr bool
|
||||
}{
|
||||
"GetDataKey success": {
|
||||
client: &stubClient{dataKey: []byte{0x1, 0x2, 0x3}},
|
||||
},
|
||||
"GetDataKey error": {
|
||||
client: &stubClient{getDataKeyErr: errors.New("error")},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
listener := bufconn.Listen(1)
|
||||
defer listener.Close()
|
||||
|
||||
client := New(listener.Addr().String())
|
||||
|
||||
client.grpc = tc.client
|
||||
|
||||
res, err := client.GetDataKey(context.Background(), "disk-uuid", 32)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
assert.Equal(tc.client.dataKey, res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
100
activation/kubeadm/kubeadm.go
Normal file
100
activation/kubeadm/kubeadm.go
Normal file
@ -0,0 +1,100 @@
|
||||
package kubeadm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
bootstraputil "k8s.io/cluster-bootstrap/token/util"
|
||||
"k8s.io/klog/v2"
|
||||
bootstraptoken "k8s.io/kubernetes/cmd/kubeadm/app/apis/bootstraptoken/v1"
|
||||
kubeadm "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
tokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/bootstraptoken/node"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util/pubkeypin"
|
||||
)
|
||||
|
||||
// Kubeadm manages joining of new nodes.
|
||||
type Kubeadm struct {
|
||||
client clientset.Interface
|
||||
file file.Handler
|
||||
}
|
||||
|
||||
// New creates a new Kubeadm instance.
|
||||
func New() (*Kubeadm, error) {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get in-cluster config: %w", err)
|
||||
}
|
||||
client, err := clientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create client: %w", err)
|
||||
}
|
||||
file := file.NewHandler(afero.NewOsFs())
|
||||
|
||||
return &Kubeadm{
|
||||
client: client,
|
||||
file: file,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetJoinToken creates a new bootstrap (join) token, which a node can use to join the cluster.
|
||||
func (k *Kubeadm) GetJoinToken(ttl time.Duration) (*kubeadm.BootstrapTokenDiscovery, error) {
|
||||
klog.V(6).Info("[kubeadm] Generating new random bootstrap token")
|
||||
rawToken, err := bootstraputil.GenerateBootstrapToken()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't generate random token: %w", err)
|
||||
}
|
||||
tokenStr, err := bootstraptoken.NewBootstrapTokenString(rawToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid token: %w", err)
|
||||
}
|
||||
token := bootstraptoken.BootstrapToken{
|
||||
Token: tokenStr,
|
||||
Description: "Bootstrap token generated by Constellation's Activation service",
|
||||
TTL: &metav1.Duration{Duration: ttl},
|
||||
}
|
||||
|
||||
// create the token in Kubernetes
|
||||
klog.V(6).Info("[kubeadm] Creating bootstrap token in Kubernetes")
|
||||
if err := tokenphase.CreateNewTokens(k.client, []bootstraptoken.BootstrapToken{token}); err != nil {
|
||||
return nil, fmt.Errorf("creating bootstrap token: %w", err)
|
||||
}
|
||||
|
||||
// parse Kubernetes CA certs
|
||||
klog.V(6).Info("[kubeadm] Preparing join token for new node")
|
||||
rawConfig, err := k.file.Read(constants.CoreOSAdminConfFilename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading kubeconfig file: %w", err)
|
||||
}
|
||||
config, err := clientcmd.Load(rawConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading kubeconfig file: %w", err)
|
||||
}
|
||||
clusterConfig := kubeconfig.GetClusterFromKubeConfig(config)
|
||||
if clusterConfig == nil {
|
||||
return nil, errors.New("couldn't get cluster config from kubeconfig file")
|
||||
}
|
||||
caCerts, err := certutil.ParseCertsPEM(clusterConfig.CertificateAuthorityData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing CA certs: %w", err)
|
||||
}
|
||||
publicKeyPins := make([]string, 0, len(caCerts))
|
||||
for _, caCert := range caCerts {
|
||||
publicKeyPins = append(publicKeyPins, pubkeypin.Hash(caCert))
|
||||
}
|
||||
|
||||
return &kubeadm.BootstrapTokenDiscovery{
|
||||
Token: tokenStr.String(),
|
||||
APIServerEndpoint: "10.118.0.1:6443", // This is not HA and should be replaced with the IP of the node issuing the token
|
||||
CACertHashes: publicKeyPins,
|
||||
}, nil
|
||||
}
|
90
activation/kubeadm/kubeadm_test.go
Normal file
90
activation/kubeadm/kubeadm_test.go
Normal file
@ -0,0 +1,90 @@
|
||||
package kubeadm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestGetJoinToken(t *testing.T) {
|
||||
validConf := `apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1EVXpNREE0TWpJd01Gb1hEVE15TURVeU56QTRNakl3TUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTmV5CnNubVJQbDYxaXZGWWRIUjFJUjdyRS9PNjNSOVhpVERwM1V4T2tMQzdMaW94bFA0SmRINzdHMUJ4Y2NCSjVISDIKZHBUTklzcjNxMEZ3ckdtK1JVYzdoRjBmZjgwdUtyUVVMN3UrYWlIRU5HSExVSFVnc3V4Tmd1bUxRdnlrRTUzNQp4dWRVSWpVV0g5M3NuRU5GempuWkRZM09SWVdNQ253OVlxMk5CZDdBRktKY1o3WDc3U1I3eStNK3czdGkvQlZpCmNtR1BvRW1WTTV3V0VReFQwYlpxNjcxTXltcmhEenFwbEZ2dkpranFIdVp6dUFhZ0pXWW9nejNsYjZLbCtmdmgKTjBjbFBDMjJyUUJJY01JWDVHdG40bzJ5U2JvQnBoRWNEWkx6TjIyU0tZZ2ViSGQwOU9lcktWdGw5bDl6cmQvVApBWm5jOTNQVCtvWTFsSmdldUE4Q0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZOVmNPNUZZY2NUTVN1SHpJWFZMYlppUnZRVVZNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBTDBsRERnbEsvY1JCNHVoQXJBRwpRSDhOeGtCSnhGNXYrVWMyZVFGa3dRTlB5SkU3QTRMV2p1eEVLN25meWVrTk91c2N2Wm1DQzVJNFhVZHAzb0ptCnZzSVlsN2YvMEFaMUt3d1RvQSt3cFF2QVB1NHlhM251MkZkMC9DVkViazNUZTV1MzRmQkxvL0YzK0Q2dFZLb2gKbVpGYmdoVjdMZms5SlQ4UzZjbGxyYjZkT3dCdGViUDBMQWZJd0hWaDBZNEsyY0thc3ZtU2xtMktpRXdURlBrbgpTSkNWWnI1aUJ3eGFadk1mYlpEaDk1bGZCbEtCVkdMNm5CcWs2TEpKM0VVd0tocTFGZEoyT0lSTkF0em14Z0R3CnNkOWd0SE4rK0pUcnhDa0ZBUTdwVWptdXBjZmpDOWhRRk1HOTRzTzk5elhZd2svTEdhV3FlS0pBYlRiNVdoRWcKYU5ZPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
server: https://127.0.0.1:16443
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: kubernetes-admin
|
||||
name: kubernetes-admin@kubernetes
|
||||
current-context: kubernetes-admin@kubernetes`
|
||||
|
||||
missingCA := `apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://127.0.0.1:16443
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: kubernetes-admin
|
||||
name: kubernetes-admin@kubernetes
|
||||
current-context: kubernetes-admin@kubernetes`
|
||||
|
||||
testCases := map[string]struct {
|
||||
adminConf string
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
adminConf: validConf,
|
||||
},
|
||||
"no certificate-authority-data": {
|
||||
adminConf: missingCA,
|
||||
wantErr: true,
|
||||
},
|
||||
"no cluster config": {
|
||||
adminConf: `apiVersion: v1
|
||||
kind: Config`,
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid config": {
|
||||
adminConf: "not a config",
|
||||
wantErr: true,
|
||||
},
|
||||
"config does not exist": {
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
client := &Kubeadm{
|
||||
file: file.NewHandler(afero.NewMemMapFs()),
|
||||
client: fake.NewSimpleClientset(),
|
||||
}
|
||||
if tc.adminConf != "" {
|
||||
require.NoError(client.file.Write(constants.CoreOSAdminConfFilename, []byte(tc.adminConf), file.OptNone))
|
||||
}
|
||||
|
||||
res, err := client.GetJoinToken(time.Minute)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
assert.NotNil(res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
128
activation/server/server.go
Normal file
128
activation/server/server.go
Normal file
@ -0,0 +1,128 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
proto "github.com/edgelesssys/constellation/activation/activationproto"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
// Server implements the core logic of Constellation's node activation service.
|
||||
type Server struct {
|
||||
file file.Handler
|
||||
joinTokenGetter joinTokenGetter
|
||||
dataKeyGetter dataKeyGetter
|
||||
proto.UnimplementedAPIServer
|
||||
}
|
||||
|
||||
// New initializes a new Server.
|
||||
func New(fileHandler file.Handler, joinTokenGetter joinTokenGetter, dataKeyGetter dataKeyGetter) *Server {
|
||||
return &Server{
|
||||
file: fileHandler,
|
||||
joinTokenGetter: joinTokenGetter,
|
||||
dataKeyGetter: dataKeyGetter,
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the gRPC server on the given port, using the provided tlsConfig.
|
||||
func (s *Server) Run(tlsConfig *tls.Config, port string) error {
|
||||
grpcServer := grpc.NewServer(
|
||||
grpc.Creds(credentials.NewTLS(tlsConfig)),
|
||||
grpc.UnaryInterceptor(logGRPC),
|
||||
)
|
||||
|
||||
proto.RegisterAPIServer(grpcServer, s)
|
||||
|
||||
lis, err := net.Listen("tcp", net.JoinHostPort("", port))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to listen: %s", err)
|
||||
}
|
||||
klog.V(2).Infof("starting activation service on %s", lis.Addr().String())
|
||||
return grpcServer.Serve(lis)
|
||||
}
|
||||
|
||||
// ActivateNode handles activation requests of Constellation worker nodes.
|
||||
// A worker node will receive:
|
||||
// - stateful disk encryption key.
|
||||
// - Kubernetes join token.
|
||||
// - cluster and owner ID to taint the node as initialized.
|
||||
func (s *Server) ActivateNode(ctx context.Context, req *proto.ActivateNodeRequest) (*proto.ActivateNodeResponse, error) {
|
||||
klog.V(4).Infof("ActivateNode: loading IDs")
|
||||
var id id
|
||||
if err := s.file.ReadJSON(constants.ActivationIDFilename, &id); err != nil {
|
||||
klog.Errorf("unable to load IDs: %s", err)
|
||||
return nil, status.Errorf(codes.Internal, "unable to load IDs: %s", err)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("ActivateNode: requesting disk encryption key")
|
||||
stateDiskKey, err := s.dataKeyGetter.GetDataKey(ctx, req.DiskUuid, constants.StateDiskKeyLength)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to get key for stateful disk: %s", err)
|
||||
return nil, status.Errorf(codes.Internal, "unable to get key for stateful disk: %s", err)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("ActivateNode: creating Kubernetes join token")
|
||||
kubeArgs, err := s.joinTokenGetter.GetJoinToken(constants.KubernetesJoinTokenTTL)
|
||||
if err != nil {
|
||||
klog.Errorf("unable to generate Kubernetes join arguments: %s", err)
|
||||
return nil, status.Errorf(codes.Internal, "unable to generate Kubernetes join arguments: %s", err)
|
||||
}
|
||||
|
||||
klog.V(4).Info("ActivateNode successful")
|
||||
|
||||
return &proto.ActivateNodeResponse{
|
||||
StateDiskKey: stateDiskKey,
|
||||
ClusterId: id.Cluster,
|
||||
OwnerId: id.Owner,
|
||||
ApiServerEndpoint: kubeArgs.APIServerEndpoint,
|
||||
Token: kubeArgs.Token,
|
||||
DiscoveryTokenCaCertHash: kubeArgs.CACertHashes[0],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ActivateCoordinator handles activation requests of Constellation control-plane nodes.
|
||||
func (s *Server) ActivateCoordinator(ctx context.Context, req *proto.ActivateCoordinatorRequest) (*proto.ActivateCoordinatorResponse, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// joinTokenGetter returns Kubernetes bootstrap (join) tokens.
|
||||
type joinTokenGetter interface {
|
||||
// GetJoinToken returns a bootstrap (join) token.
|
||||
GetJoinToken(ttl time.Duration) (*kubeadmv1.BootstrapTokenDiscovery, error)
|
||||
}
|
||||
|
||||
// dataKeyGetter interacts with Constellation's key management system to retrieve keys.
|
||||
type dataKeyGetter interface {
|
||||
// GetDataKey returns a key derived from Constellation's KMS.
|
||||
GetDataKey(ctx context.Context, uuid string, length int) ([]byte, error)
|
||||
}
|
||||
|
||||
type id struct {
|
||||
Cluster []byte `json:"cluster"`
|
||||
Owner []byte `json:"owner"`
|
||||
}
|
||||
|
||||
// logGRPC writes a log with the name of every gRPC call or error it receives.
|
||||
func logGRPC(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
|
||||
// log the requests method name
|
||||
klog.V(2).Infof("GRPC call: %s", info.FullMethod)
|
||||
|
||||
// log errors, if any
|
||||
resp, err := handler(ctx, req)
|
||||
if err != nil {
|
||||
klog.Errorf("GRPC error: %v", err)
|
||||
}
|
||||
return resp, err
|
||||
}
|
123
activation/server/server_test.go
Normal file
123
activation/server/server_test.go
Normal file
@ -0,0 +1,123 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
proto "github.com/edgelesssys/constellation/activation/activationproto"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
kubeadmv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
|
||||
)
|
||||
|
||||
func TestActivateNode(t *testing.T) {
|
||||
someErr := errors.New("error")
|
||||
testKey := []byte{0x1, 0x2, 0x3}
|
||||
testID := id{
|
||||
Owner: []byte{0x4, 0x5, 0x6},
|
||||
Cluster: []byte{0x7, 0x8, 0x9},
|
||||
}
|
||||
testJoinToken := &kubeadmv1.BootstrapTokenDiscovery{
|
||||
APIServerEndpoint: "192.0.2.1",
|
||||
CACertHashes: []string{"hash"},
|
||||
Token: "token",
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
kubeadm stubTokenGetter
|
||||
kms stubKeyGetter
|
||||
id []byte
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
kubeadm: stubTokenGetter{token: testJoinToken},
|
||||
kms: stubKeyGetter{dataKey: testKey},
|
||||
id: mustMarshalID(testID),
|
||||
},
|
||||
"GetDataKey fails": {
|
||||
kubeadm: stubTokenGetter{token: testJoinToken},
|
||||
kms: stubKeyGetter{getDataKeyErr: someErr},
|
||||
id: mustMarshalID(testID),
|
||||
wantErr: true,
|
||||
},
|
||||
"loading IDs fails": {
|
||||
kubeadm: stubTokenGetter{token: testJoinToken},
|
||||
kms: stubKeyGetter{dataKey: testKey},
|
||||
id: []byte{0x1, 0x2, 0x3},
|
||||
wantErr: true,
|
||||
},
|
||||
"no ID file": {
|
||||
kubeadm: stubTokenGetter{token: testJoinToken},
|
||||
kms: stubKeyGetter{dataKey: testKey},
|
||||
wantErr: true,
|
||||
},
|
||||
"GetJoinToken fails": {
|
||||
kubeadm: stubTokenGetter{getJoinTokenErr: someErr},
|
||||
kms: stubKeyGetter{dataKey: testKey},
|
||||
id: mustMarshalID(testID),
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
file := file.NewHandler(afero.NewMemMapFs())
|
||||
if len(tc.id) > 0 {
|
||||
require.NoError(file.Write(constants.ActivationIDFilename, tc.id, 0o644))
|
||||
}
|
||||
api := New(file, tc.kubeadm, tc.kms)
|
||||
|
||||
resp, err := api.ActivateNode(context.Background(), &proto.ActivateNodeRequest{DiskUuid: "uuid"})
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
var expectedIDs id
|
||||
require.NoError(json.Unmarshal(tc.id, &expectedIDs))
|
||||
|
||||
require.NoError(err)
|
||||
assert.Equal(tc.kms.dataKey, resp.StateDiskKey)
|
||||
assert.Equal(expectedIDs.Cluster, resp.ClusterId)
|
||||
assert.Equal(expectedIDs.Owner, resp.OwnerId)
|
||||
assert.Equal(tc.kubeadm.token.APIServerEndpoint, resp.ApiServerEndpoint)
|
||||
assert.Equal(tc.kubeadm.token.CACertHashes[0], resp.DiscoveryTokenCaCertHash)
|
||||
assert.Equal(tc.kubeadm.token.Token, resp.Token)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func mustMarshalID(id id) []byte {
|
||||
b, err := json.Marshal(id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
type stubTokenGetter struct {
|
||||
token *kubeadmv1.BootstrapTokenDiscovery
|
||||
getJoinTokenErr error
|
||||
}
|
||||
|
||||
func (f stubTokenGetter) GetJoinToken(time.Duration) (*kubeadmv1.BootstrapTokenDiscovery, error) {
|
||||
return f.token, f.getJoinTokenErr
|
||||
}
|
||||
|
||||
type stubKeyGetter struct {
|
||||
dataKey []byte
|
||||
getDataKeyErr error
|
||||
}
|
||||
|
||||
func (f stubKeyGetter) GetDataKey(context.Context, string, int) ([]byte, error) {
|
||||
return f.dataKey, f.getDataKeyErr
|
||||
}
|
81
activation/validator/validator.go
Normal file
81
activation/validator/validator.go
Normal file
@ -0,0 +1,81 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"encoding/asn1"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/atls"
|
||||
"github.com/edgelesssys/constellation/coordinator/attestation/azure"
|
||||
"github.com/edgelesssys/constellation/coordinator/attestation/gcp"
|
||||
"github.com/edgelesssys/constellation/coordinator/attestation/qemu"
|
||||
"github.com/edgelesssys/constellation/internal/cloud/cloudprovider"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// Updatable implements an updatable atls.Validator.
|
||||
type Updatable struct {
|
||||
mux sync.Mutex
|
||||
newValidator newValidatorFunc
|
||||
fileHandler file.Handler
|
||||
atls.Validator
|
||||
}
|
||||
|
||||
// New initializes a new updatable validator.
|
||||
func New(csp string, fileHandler file.Handler) (*Updatable, error) {
|
||||
var newValidator newValidatorFunc
|
||||
switch cloudprovider.FromString(csp) {
|
||||
case cloudprovider.Azure:
|
||||
newValidator = func(m map[uint32][]byte) atls.Validator { return azure.NewValidator(m) }
|
||||
case cloudprovider.GCP:
|
||||
newValidator = func(m map[uint32][]byte) atls.Validator { return gcp.NewValidator(m) }
|
||||
case cloudprovider.QEMU:
|
||||
newValidator = func(m map[uint32][]byte) atls.Validator { return qemu.NewValidator(m) }
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown cloud service provider: %q", csp)
|
||||
}
|
||||
|
||||
u := &Updatable{
|
||||
newValidator: newValidator,
|
||||
fileHandler: fileHandler,
|
||||
}
|
||||
|
||||
if err := u.Update(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// Validate calls the validators Validate method, and prevents any updates during the call.
|
||||
func (u *Updatable) Validate(attDoc []byte, nonce []byte) ([]byte, error) {
|
||||
u.mux.Lock()
|
||||
defer u.mux.Unlock()
|
||||
return u.Validator.Validate(attDoc, nonce)
|
||||
}
|
||||
|
||||
// OID returns the validators Object Identifier.
|
||||
func (u *Updatable) OID() asn1.ObjectIdentifier {
|
||||
return u.Validator.OID()
|
||||
}
|
||||
|
||||
// Update switches out the underlying validator.
|
||||
func (u *Updatable) Update() error {
|
||||
u.mux.Lock()
|
||||
defer u.mux.Unlock()
|
||||
|
||||
klog.V(4).Info("Updating expected measurements")
|
||||
|
||||
var measurements map[uint32][]byte
|
||||
if err := u.fileHandler.ReadJSON(constants.ActivationMeasurementsFilename, &measurements); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(6).Infof("New measurements: %v", measurements)
|
||||
|
||||
u.Validator = u.newValidator(measurements)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type newValidatorFunc func(measurements map[uint32][]byte) atls.Validator
|
210
activation/validator/validator_test.go
Normal file
210
activation/validator/validator_test.go
Normal file
@ -0,0 +1,210 @@
|
||||
package validator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/asn1"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/edgelesssys/constellation/coordinator/atls"
|
||||
"github.com/edgelesssys/constellation/internal/constants"
|
||||
"github.com/edgelesssys/constellation/internal/file"
|
||||
"github.com/spf13/afero"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewUpdateableValidator(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
provider string
|
||||
writeFile bool
|
||||
wantErr bool
|
||||
}{
|
||||
"azure": {
|
||||
provider: "azure",
|
||||
writeFile: true,
|
||||
},
|
||||
"gcp": {
|
||||
provider: "gcp",
|
||||
writeFile: true,
|
||||
},
|
||||
"qemu": {
|
||||
provider: "qemu",
|
||||
writeFile: true,
|
||||
},
|
||||
"no file": {
|
||||
provider: "azure",
|
||||
writeFile: false,
|
||||
wantErr: true,
|
||||
},
|
||||
"invalid provider": {
|
||||
provider: "invalid",
|
||||
writeFile: true,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
handler := file.NewHandler(afero.NewMemMapFs())
|
||||
if tc.writeFile {
|
||||
require.NoError(handler.WriteJSON(
|
||||
constants.ActivationMeasurementsFilename,
|
||||
map[uint32][]byte{
|
||||
11: {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
file.OptNone,
|
||||
))
|
||||
}
|
||||
|
||||
_, err := New(tc.provider, handler)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
} else {
|
||||
assert.NoError(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
oid := fakeOID{1, 3, 9900, 1}
|
||||
newValidator := func(m map[uint32][]byte) atls.Validator {
|
||||
return fakeValidator{fakeOID: oid}
|
||||
}
|
||||
handler := file.NewHandler(afero.NewMemMapFs())
|
||||
|
||||
// create server
|
||||
validator := &Updatable{newValidator: newValidator, fileHandler: handler}
|
||||
|
||||
// Update should fail if the file does not exist
|
||||
assert.Error(validator.Update())
|
||||
|
||||
// write measurement config
|
||||
require.NoError(handler.WriteJSON(
|
||||
constants.ActivationMeasurementsFilename,
|
||||
map[uint32][]byte{
|
||||
11: {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
file.OptNone,
|
||||
))
|
||||
|
||||
// call update once to initialize the server's validator
|
||||
require.NoError(validator.Update())
|
||||
|
||||
// create tls config and start the server
|
||||
serverConfig, err := atls.CreateAttestationServerTLSConfig(nil, []atls.Validator{validator})
|
||||
require.NoError(err)
|
||||
server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = io.WriteString(w, "hello")
|
||||
}))
|
||||
server.TLS = serverConfig
|
||||
server.StartTLS()
|
||||
defer server.Close()
|
||||
|
||||
// test connection to server
|
||||
clientOID := fakeOID{1, 3, 9900, 1}
|
||||
resp, err := testConnection(require, server.URL, clientOID)
|
||||
require.NoError(err)
|
||||
defer resp.Body.Close()
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
require.NoError(err)
|
||||
assert.EqualValues("hello", body)
|
||||
|
||||
// update the server's validator
|
||||
oid = fakeOID{1, 3, 9900, 2}
|
||||
require.NoError(validator.Update())
|
||||
|
||||
// client connection should fail now, since the server's validator expects a different OID from the client
|
||||
_, err = testConnection(require, server.URL, clientOID)
|
||||
assert.Error(err)
|
||||
}
|
||||
|
||||
func TestUpdateConcurrency(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
require := require.New(t)
|
||||
|
||||
handler := file.NewHandler(afero.NewMemMapFs())
|
||||
validator := &Updatable{
|
||||
fileHandler: handler,
|
||||
newValidator: func(m map[uint32][]byte) atls.Validator {
|
||||
return fakeValidator{fakeOID: fakeOID{1, 3, 9900, 1}}
|
||||
},
|
||||
}
|
||||
require.NoError(handler.WriteJSON(
|
||||
constants.ActivationMeasurementsFilename,
|
||||
map[uint32][]byte{
|
||||
11: {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
|
||||
},
|
||||
file.OptNone,
|
||||
))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
assert.NoError(validator.Update())
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func testConnection(require *require.Assertions, url string, oid fakeOID) (*http.Response, error) {
|
||||
clientConfig, err := atls.CreateAttestationClientTLSConfig(fakeIssuer{fakeOID: oid}, nil)
|
||||
require.NoError(err)
|
||||
client := http.Client{Transport: &http.Transport{TLSClientConfig: clientConfig}}
|
||||
|
||||
req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, url, http.NoBody)
|
||||
require.NoError(err)
|
||||
return client.Do(req)
|
||||
}
|
||||
|
||||
type fakeIssuer struct {
|
||||
fakeOID
|
||||
}
|
||||
|
||||
func (fakeIssuer) Issue(userData []byte, nonce []byte) ([]byte, error) {
|
||||
return json.Marshal(fakeDoc{UserData: userData, Nonce: nonce})
|
||||
}
|
||||
|
||||
type fakeValidator struct {
|
||||
fakeOID
|
||||
err error
|
||||
}
|
||||
|
||||
func (v fakeValidator) Validate(attDoc []byte, nonce []byte) ([]byte, error) {
|
||||
var doc fakeDoc
|
||||
if err := json.Unmarshal(attDoc, &doc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !bytes.Equal(doc.Nonce, nonce) {
|
||||
return nil, errors.New("invalid nonce")
|
||||
}
|
||||
return doc.UserData, v.err
|
||||
}
|
||||
|
||||
type fakeOID asn1.ObjectIdentifier
|
||||
|
||||
func (o fakeOID) OID() asn1.ObjectIdentifier {
|
||||
return asn1.ObjectIdentifier(o)
|
||||
}
|
||||
|
||||
type fakeDoc struct {
|
||||
UserData []byte
|
||||
Nonce []byte
|
||||
}
|
108
activation/watcher/watcher.go
Normal file
108
activation/watcher/watcher.go
Normal file
@ -0,0 +1,108 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// FileWatcher watches for changes to the file and calls the waiter's Update method.
|
||||
type FileWatcher struct {
|
||||
updater updater
|
||||
watcher eventWatcher
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// New creates a new FileWatcher for the given validator.
|
||||
func New(updater updater) (*FileWatcher, error) {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &FileWatcher{
|
||||
watcher: &fsnotifyWatcher{watcher},
|
||||
updater: updater,
|
||||
done: make(chan struct{}, 1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the watcher.
|
||||
// It should only be called once.
|
||||
func (f *FileWatcher) Close() error {
|
||||
err := f.watcher.Close()
|
||||
<-f.done
|
||||
return err
|
||||
}
|
||||
|
||||
// Watch starts watching the file at the given path.
|
||||
// It will call the watcher's Update method when the file is modified.
|
||||
func (f *FileWatcher) Watch(file string) error {
|
||||
defer func() { f.done <- struct{}{} }()
|
||||
if err := f.watcher.Add(file); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-f.watcher.Events():
|
||||
if !ok {
|
||||
klog.V(4).Infof("watcher closed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// file changes may be indicated by either a WRITE, CHMOD, CREATE or RENAME event
|
||||
if event.Op&(fsnotify.Write|fsnotify.Chmod|fsnotify.Create|fsnotify.Rename) != 0 {
|
||||
if err := f.updater.Update(); err != nil {
|
||||
klog.Errorf("failed to update activation validator: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// if a file gets removed, e.g. by a rename event, we need to re-add the file to the watcher
|
||||
if event.Op&fsnotify.Remove == fsnotify.Remove {
|
||||
if err := f.watcher.Add(event.Name); err != nil {
|
||||
klog.Errorf("failed to re-add file %q to watcher: %s", event.Name, err)
|
||||
return fmt.Errorf("failed to re-add file %q to watcher: %w", event.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
case err := <-f.watcher.Errors():
|
||||
if err != nil {
|
||||
klog.Errorf("watching for measurements updates: %s", err)
|
||||
return fmt.Errorf("watching for measurements updates: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type updater interface {
|
||||
Update() error
|
||||
}
|
||||
|
||||
type eventWatcher interface {
|
||||
Add(string) error
|
||||
Close() error
|
||||
Events() <-chan fsnotify.Event
|
||||
Errors() <-chan error
|
||||
}
|
||||
|
||||
type fsnotifyWatcher struct {
|
||||
watcher *fsnotify.Watcher
|
||||
}
|
||||
|
||||
func (w *fsnotifyWatcher) Add(file string) error {
|
||||
return w.watcher.Add(file)
|
||||
}
|
||||
|
||||
func (w *fsnotifyWatcher) Close() error {
|
||||
return w.watcher.Close()
|
||||
}
|
||||
|
||||
func (w *fsnotifyWatcher) Events() <-chan fsnotify.Event {
|
||||
return w.watcher.Events
|
||||
}
|
||||
|
||||
func (w *fsnotifyWatcher) Errors() <-chan error {
|
||||
return w.watcher.Errors
|
||||
}
|
170
activation/watcher/watcher_test.go
Normal file
170
activation/watcher/watcher_test.go
Normal file
@ -0,0 +1,170 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWatcher(t *testing.T) {
|
||||
someErr := errors.New("error")
|
||||
|
||||
testCases := map[string]struct {
|
||||
updater *testUpdater
|
||||
watcher *testWatcher
|
||||
events []fsnotify.Event
|
||||
watchErr error
|
||||
wantAddCalls int
|
||||
wantErr bool
|
||||
}{
|
||||
"success": {
|
||||
updater: &testUpdater{},
|
||||
watcher: &testWatcher{
|
||||
events: make(chan fsnotify.Event, 1),
|
||||
errors: make(chan error, 1),
|
||||
},
|
||||
events: []fsnotify.Event{
|
||||
{Op: fsnotify.Write, Name: "test"},
|
||||
{Op: fsnotify.Chmod, Name: "test"},
|
||||
{Op: fsnotify.Create, Name: "test"},
|
||||
{Op: fsnotify.Rename, Name: "test"},
|
||||
},
|
||||
wantAddCalls: 1,
|
||||
},
|
||||
"failing update does not interrupt execution": {
|
||||
updater: &testUpdater{
|
||||
err: someErr,
|
||||
},
|
||||
watcher: &testWatcher{
|
||||
events: make(chan fsnotify.Event, 1),
|
||||
errors: make(chan error, 1),
|
||||
},
|
||||
events: []fsnotify.Event{
|
||||
{Op: fsnotify.Write, Name: "test"},
|
||||
{Op: fsnotify.Write, Name: "test"},
|
||||
},
|
||||
wantAddCalls: 1,
|
||||
},
|
||||
"removed file gets re-added": {
|
||||
updater: &testUpdater{},
|
||||
watcher: &testWatcher{
|
||||
events: make(chan fsnotify.Event, 1),
|
||||
errors: make(chan error, 1),
|
||||
},
|
||||
events: []fsnotify.Event{
|
||||
{Op: fsnotify.Write, Name: "test"},
|
||||
{Op: fsnotify.Remove, Name: "test"},
|
||||
},
|
||||
wantAddCalls: 2,
|
||||
},
|
||||
"re-adding file fails": {
|
||||
updater: &testUpdater{},
|
||||
watcher: &testWatcher{
|
||||
addErr: someErr,
|
||||
events: make(chan fsnotify.Event, 1),
|
||||
errors: make(chan error, 1),
|
||||
},
|
||||
events: []fsnotify.Event{{Op: fsnotify.Remove, Name: "test"}},
|
||||
wantAddCalls: 1,
|
||||
wantErr: true,
|
||||
},
|
||||
"add file fails": {
|
||||
updater: &testUpdater{},
|
||||
watcher: &testWatcher{
|
||||
addErr: someErr,
|
||||
events: make(chan fsnotify.Event, 1),
|
||||
errors: make(chan error, 1),
|
||||
},
|
||||
wantAddCalls: 1,
|
||||
wantErr: true,
|
||||
},
|
||||
"error during watch": {
|
||||
updater: &testUpdater{},
|
||||
watcher: &testWatcher{
|
||||
events: make(chan fsnotify.Event, 1),
|
||||
errors: make(chan error, 1),
|
||||
},
|
||||
wantAddCalls: 1,
|
||||
watchErr: someErr,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
watcher := &FileWatcher{
|
||||
updater: tc.updater,
|
||||
watcher: tc.watcher,
|
||||
done: make(chan struct{}, 1),
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := watcher.Watch("test")
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
}
|
||||
assert.NoError(err)
|
||||
}()
|
||||
|
||||
time.Sleep(15 * time.Millisecond)
|
||||
|
||||
for _, event := range tc.events {
|
||||
tc.watcher.events <- event
|
||||
}
|
||||
|
||||
if tc.watchErr != nil {
|
||||
tc.watcher.errors <- tc.watchErr
|
||||
}
|
||||
|
||||
close(tc.watcher.events)
|
||||
assert.NoError(watcher.Close())
|
||||
wg.Wait()
|
||||
|
||||
// check that the watchers Add method was called the expected number of times
|
||||
assert.Equal(tc.wantAddCalls, tc.watcher.addCalled)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type testUpdater struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (u *testUpdater) Update() error {
|
||||
return u.err
|
||||
}
|
||||
|
||||
type testWatcher struct {
|
||||
addCalled int
|
||||
addErr error
|
||||
closeErr error
|
||||
events chan fsnotify.Event
|
||||
errors chan error
|
||||
}
|
||||
|
||||
func (w *testWatcher) Add(path string) error {
|
||||
w.addCalled++
|
||||
return w.addErr
|
||||
}
|
||||
|
||||
func (w *testWatcher) Close() error {
|
||||
return w.closeErr
|
||||
}
|
||||
|
||||
func (w *testWatcher) Events() <-chan fsnotify.Event {
|
||||
return w.events
|
||||
}
|
||||
|
||||
func (w *testWatcher) Errors() <-chan error {
|
||||
return w.errors
|
||||
}
|
9
go.mod
9
go.mod
@ -20,6 +20,7 @@ replace (
|
||||
k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.24.0
|
||||
k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.24.0
|
||||
k8s.io/kube-proxy => k8s.io/kube-proxy v0.24.0
|
||||
k8s.io/kube-proxy v0.0.0 => k8s.io/kube-proxy v0.24.0
|
||||
k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.24.0
|
||||
k8s.io/kubectl => k8s.io/kubectl v0.24.0
|
||||
k8s.io/kubelet => k8s.io/kubelet v0.24.0
|
||||
@ -68,6 +69,7 @@ require (
|
||||
github.com/coreos/go-systemd/v22 v22.3.2
|
||||
github.com/docker/docker v20.10.13+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/fsnotify/fsnotify v1.5.4
|
||||
github.com/go-playground/locales v0.14.0
|
||||
github.com/go-playground/universal-translator v0.18.0
|
||||
github.com/go-playground/validator/v10 v10.11.0
|
||||
@ -103,8 +105,10 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99
|
||||
k8s.io/api v0.24.0
|
||||
k8s.io/apimachinery v0.24.0
|
||||
k8s.io/apiserver v0.24.0
|
||||
k8s.io/cli-runtime v0.24.0
|
||||
k8s.io/client-go v0.24.0
|
||||
k8s.io/cluster-bootstrap v0.0.0
|
||||
k8s.io/klog/v2 v2.60.1
|
||||
k8s.io/kubelet v0.0.0
|
||||
k8s.io/kubernetes v1.24.0
|
||||
@ -154,7 +158,6 @@ require (
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-logr/logr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
@ -215,7 +218,7 @@ require (
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5 // indirect
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
|
||||
golang.org/x/text v0.3.7
|
||||
golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect
|
||||
@ -224,8 +227,6 @@ require (
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/apiserver v0.24.0
|
||||
k8s.io/cluster-bootstrap v0.0.0 // indirect
|
||||
k8s.io/component-base v0.24.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
|
||||
|
4
go.sum
4
go.sum
@ -604,6 +604,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||
github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM=
|
||||
github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o=
|
||||
@ -1917,6 +1919,8 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5 h1:y/woIyUBFbpQGKS0u1aHF/40WUDnek3fPOyD08H5Vng=
|
||||
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
|
||||
|
@ -37,12 +37,17 @@ const (
|
||||
// Filenames.
|
||||
//
|
||||
|
||||
StateFilename = "constellation-state.json"
|
||||
ConfigFilename = "constellation-conf.yaml"
|
||||
DebugdConfigFilename = "cdbg-conf.yaml"
|
||||
AdminConfFilename = "constellation-admin.conf"
|
||||
MasterSecretFilename = "constellation-mastersecret.base64"
|
||||
WGQuickConfigFilename = "wg0.conf"
|
||||
StateFilename = "constellation-state.json"
|
||||
ConfigFilename = "constellation-conf.yaml"
|
||||
DebugdConfigFilename = "cdbg-conf.yaml"
|
||||
AdminConfFilename = "constellation-admin.conf"
|
||||
MasterSecretFilename = "constellation-mastersecret.base64"
|
||||
WGQuickConfigFilename = "wg0.conf"
|
||||
CoreOSAdminConfFilename = "/etc/kubernetes/admin.conf"
|
||||
|
||||
// Filenames for the Activation service.
|
||||
ActivationMeasurementsFilename = "/var/config/measurements"
|
||||
ActivationIDFilename = "/var/config/id"
|
||||
|
||||
//
|
||||
// Cryptographic constants.
|
||||
|
@ -49,6 +49,10 @@ WORKDIR /kms
|
||||
COPY kms/server/kmsapi/kmsproto/*.proto /kms
|
||||
RUN protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative *.proto
|
||||
|
||||
## activation
|
||||
WORKDIR /activation
|
||||
COPY activation/activationproto/*.proto /activation
|
||||
RUN protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative *.proto
|
||||
|
||||
FROM scratch as export
|
||||
COPY --from=build /pubapi/*.go coordinator/pubapi/pubproto/
|
||||
@ -56,3 +60,4 @@ COPY --from=build /vpnapi/*.go coordinator/vpnapi/vpnproto/
|
||||
COPY --from=build /disk-mapper/*.go state/keyservice/keyproto/
|
||||
COPY --from=build /service/*.go debugd/service/
|
||||
COPY --from=build /kms/*.go kms/server/kmsapi/kmsproto/
|
||||
COPY --from=build /activation/*.go activation/activationproto/
|
||||
|
Loading…
Reference in New Issue
Block a user