mirror of
https://github.com/edgelesssys/constellation.git
synced 2025-01-11 23:49:30 -05:00
Feat/revive (#212)
* enable revive as linter * fix var-naming revive issues * fix blank-imports revive issues * fix receiver-naming revive issues * fix exported revive issues * fix indent-error-flow revive issues * fix unexported-return revive issues * fix indent-error-flow revive issues Signed-off-by: Fabian Kammel <fk@edgeless.systems>
This commit is contained in:
parent
2e93b354e4
commit
369480a50b
@ -27,6 +27,7 @@ linters:
|
|||||||
- gofumpt
|
- gofumpt
|
||||||
- misspell
|
- misspell
|
||||||
- noctx
|
- noctx
|
||||||
|
- revive
|
||||||
- tenv
|
- tenv
|
||||||
- unconvert
|
- unconvert
|
||||||
- unparam
|
- unparam
|
||||||
|
@ -115,7 +115,7 @@ func main() {
|
|||||||
log.With(zap.Error(err)).Fatalf("Failed to get selected PCRs")
|
log.With(zap.Error(err)).Fatalf("Failed to get selected PCRs")
|
||||||
}
|
}
|
||||||
|
|
||||||
if idkeydigest, err := snp.GetIdKeyDigest(vtpm.OpenVTPM); err == nil {
|
if idkeydigest, err := snp.GetIDKeyDigest(vtpm.OpenVTPM); err == nil {
|
||||||
issuer = initserver.NewIssuerWrapper(snp.NewIssuer(), vmtype.AzureCVM, idkeydigest)
|
issuer = initserver.NewIssuerWrapper(snp.NewIssuer(), vmtype.AzureCVM, idkeydigest)
|
||||||
} else {
|
} else {
|
||||||
// assume we are running in a trusted-launch VM
|
// assume we are running in a trusted-launch VM
|
||||||
|
@ -10,7 +10,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type cleaner struct {
|
type Cleaner struct {
|
||||||
stoppers []stopper
|
stoppers []stopper
|
||||||
stopC chan struct{}
|
stopC chan struct{}
|
||||||
startOnce sync.Once
|
startOnce sync.Once
|
||||||
@ -18,8 +18,8 @@ type cleaner struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new cleaner.
|
// New creates a new cleaner.
|
||||||
func New(stoppers ...stopper) *cleaner {
|
func New(stoppers ...stopper) *Cleaner {
|
||||||
res := &cleaner{
|
res := &Cleaner{
|
||||||
stoppers: stoppers,
|
stoppers: stoppers,
|
||||||
stopC: make(chan struct{}, 1),
|
stopC: make(chan struct{}, 1),
|
||||||
}
|
}
|
||||||
@ -28,13 +28,13 @@ func New(stoppers ...stopper) *cleaner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// With adds a new stopper to the cleaner.
|
// With adds a new stopper to the cleaner.
|
||||||
func (c *cleaner) With(stopper stopper) *cleaner {
|
func (c *Cleaner) With(stopper stopper) *Cleaner {
|
||||||
c.stoppers = append(c.stoppers, stopper)
|
c.stoppers = append(c.stoppers, stopper)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start blocks until it receives a stop message, stops all services gracefully and returns.
|
// Start blocks until it receives a stop message, stops all services gracefully and returns.
|
||||||
func (c *cleaner) Start() {
|
func (c *Cleaner) Start() {
|
||||||
c.startOnce.Do(func() {
|
c.startOnce.Do(func() {
|
||||||
defer c.wg.Done()
|
defer c.wg.Done()
|
||||||
// wait for the stop message
|
// wait for the stop message
|
||||||
@ -51,7 +51,7 @@ func (c *cleaner) Start() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Clean initiates the cleanup but does not wait for it to complete.
|
// Clean initiates the cleanup but does not wait for it to complete.
|
||||||
func (c *cleaner) Clean() {
|
func (c *Cleaner) Clean() {
|
||||||
// try to enqueue the stop message once
|
// try to enqueue the stop message once
|
||||||
// if the channel is full, the message is dropped
|
// if the channel is full, the message is dropped
|
||||||
select {
|
select {
|
||||||
@ -61,7 +61,7 @@ func (c *cleaner) Clean() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Done waits for the cleanup to complete.
|
// Done waits for the cleanup to complete.
|
||||||
func (c *cleaner) Done() {
|
func (c *Cleaner) Done() {
|
||||||
c.wg.Wait()
|
c.wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro
|
|||||||
measurementSalt,
|
measurementSalt,
|
||||||
req.EnforcedPcrs,
|
req.EnforcedPcrs,
|
||||||
req.EnforceIdkeydigest,
|
req.EnforceIdkeydigest,
|
||||||
s.issuerWrapper.IdKeyDigest(),
|
s.issuerWrapper.IDKeyDigest(),
|
||||||
s.issuerWrapper.VMType() == vmtype.AzureCVM,
|
s.issuerWrapper.VMType() == vmtype.AzureCVM,
|
||||||
resources.KMSConfig{
|
resources.KMSConfig{
|
||||||
MasterSecret: req.MasterSecret,
|
MasterSecret: req.MasterSecret,
|
||||||
@ -199,7 +199,7 @@ func (i *IssuerWrapper) VMType() vmtype.VMType {
|
|||||||
return i.vmType
|
return i.vmType
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *IssuerWrapper) IdKeyDigest() []byte {
|
func (i *IssuerWrapper) IDKeyDigest() []byte {
|
||||||
return i.idkeydigest
|
return i.idkeydigest
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ type ClusterInitializer interface {
|
|||||||
k8sVersion string,
|
k8sVersion string,
|
||||||
measurementSalt []byte,
|
measurementSalt []byte,
|
||||||
enforcedPcrs []uint32,
|
enforcedPcrs []uint32,
|
||||||
enforceIdKeyDigest bool,
|
enforceIDKeyDigest bool,
|
||||||
idKeyDigest []byte,
|
idKeyDigest []byte,
|
||||||
azureCVM bool,
|
azureCVM bool,
|
||||||
kmsConfig resources.KMSConfig,
|
kmsConfig resources.KMSConfig,
|
||||||
|
@ -18,8 +18,8 @@ import (
|
|||||||
|
|
||||||
const accessManagerNamespace = "kube-system"
|
const accessManagerNamespace = "kube-system"
|
||||||
|
|
||||||
// accessManagerDeployment holds the configuration for the SSH user creation pods. User/Key definitions are stored in the ConfigMap, and the manager is deployed on each node by the DaemonSet.
|
// AccessManagerDeployment holds the configuration for the SSH user creation pods. User/Key definitions are stored in the ConfigMap, and the manager is deployed on each node by the DaemonSet.
|
||||||
type accessManagerDeployment struct {
|
type AccessManagerDeployment struct {
|
||||||
ConfigMap k8s.ConfigMap
|
ConfigMap k8s.ConfigMap
|
||||||
ServiceAccount k8s.ServiceAccount
|
ServiceAccount k8s.ServiceAccount
|
||||||
Role rbac.Role
|
Role rbac.Role
|
||||||
@ -28,8 +28,8 @@ type accessManagerDeployment struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewAccessManagerDeployment creates a new *accessManagerDeployment which manages the SSH users for the cluster.
|
// NewAccessManagerDeployment creates a new *accessManagerDeployment which manages the SSH users for the cluster.
|
||||||
func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeployment {
|
func NewAccessManagerDeployment(sshUsers map[string]string) *AccessManagerDeployment {
|
||||||
return &accessManagerDeployment{
|
return &AccessManagerDeployment{
|
||||||
ServiceAccount: k8s.ServiceAccount{
|
ServiceAccount: k8s.ServiceAccount{
|
||||||
TypeMeta: v1.TypeMeta{
|
TypeMeta: v1.TypeMeta{
|
||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
@ -198,6 +198,6 @@ func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeploy
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Marshal marshals the access-manager deployment as YAML documents.
|
// Marshal marshals the access-manager deployment as YAML documents.
|
||||||
func (c *accessManagerDeployment) Marshal() ([]byte, error) {
|
func (c *AccessManagerDeployment) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(c)
|
return kubernetes.MarshalK8SResources(c)
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,7 @@ func TestAccessManagerMarshalUnmarshal(t *testing.T) {
|
|||||||
data, err := accessManagerDeplNil.Marshal()
|
data, err := accessManagerDeplNil.Marshal()
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
var recreated accessManagerDeployment
|
var recreated AccessManagerDeployment
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||||
assert.Equal(accessManagerDeplNil, &recreated)
|
assert.Equal(accessManagerDeplNil, &recreated)
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ import (
|
|||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type cloudControllerManagerDeployment struct {
|
type CloudControllerManagerDeployment struct {
|
||||||
ServiceAccount k8s.ServiceAccount
|
ServiceAccount k8s.ServiceAccount
|
||||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||||
DaemonSet apps.DaemonSet
|
DaemonSet apps.DaemonSet
|
||||||
@ -27,7 +27,7 @@ type cloudControllerManagerDeployment struct {
|
|||||||
// https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager
|
// https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager
|
||||||
|
|
||||||
// NewDefaultCloudControllerManagerDeployment creates a new *cloudControllerManagerDeployment, customized for the CSP.
|
// NewDefaultCloudControllerManagerDeployment creates a new *cloudControllerManagerDeployment, customized for the CSP.
|
||||||
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podCIDR string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *cloudControllerManagerDeployment {
|
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podCIDR string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *CloudControllerManagerDeployment {
|
||||||
command := []string{
|
command := []string{
|
||||||
path,
|
path,
|
||||||
fmt.Sprintf("--cloud-provider=%s", cloudProvider),
|
fmt.Sprintf("--cloud-provider=%s", cloudProvider),
|
||||||
@ -76,7 +76,7 @@ func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podC
|
|||||||
}
|
}
|
||||||
volumeMounts = append(volumeMounts, extraVolumeMounts...)
|
volumeMounts = append(volumeMounts, extraVolumeMounts...)
|
||||||
|
|
||||||
return &cloudControllerManagerDeployment{
|
return &CloudControllerManagerDeployment{
|
||||||
ServiceAccount: k8s.ServiceAccount{
|
ServiceAccount: k8s.ServiceAccount{
|
||||||
TypeMeta: meta.TypeMeta{
|
TypeMeta: meta.TypeMeta{
|
||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
@ -174,6 +174,6 @@ func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podC
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cloudControllerManagerDeployment) Marshal() ([]byte, error) {
|
func (c *CloudControllerManagerDeployment) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(c)
|
return kubernetes.MarshalK8SResources(c)
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ func TestCloudControllerMarshalUnmarshal(t *testing.T) {
|
|||||||
data, err := cloudControllerManagerDepl.Marshal()
|
data, err := cloudControllerManagerDepl.Marshal()
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
var recreated cloudControllerManagerDeployment
|
var recreated CloudControllerManagerDeployment
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||||
assert.Equal(cloudControllerManagerDepl, &recreated)
|
assert.Equal(cloudControllerManagerDepl, &recreated)
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type cloudNodeManagerDeployment struct {
|
type CloudNodeManagerDeployment struct {
|
||||||
ServiceAccount k8s.ServiceAccount
|
ServiceAccount k8s.ServiceAccount
|
||||||
ClusterRole rbac.ClusterRole
|
ClusterRole rbac.ClusterRole
|
||||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||||
@ -23,13 +23,13 @@ type cloudNodeManagerDeployment struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewDefaultCloudNodeManagerDeployment creates a new *cloudNodeManagerDeployment, customized for the CSP.
|
// NewDefaultCloudNodeManagerDeployment creates a new *cloudNodeManagerDeployment, customized for the CSP.
|
||||||
func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string) *cloudNodeManagerDeployment {
|
func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string) *CloudNodeManagerDeployment {
|
||||||
command := []string{
|
command := []string{
|
||||||
path,
|
path,
|
||||||
"--node-name=$(NODE_NAME)",
|
"--node-name=$(NODE_NAME)",
|
||||||
}
|
}
|
||||||
command = append(command, extraArgs...)
|
command = append(command, extraArgs...)
|
||||||
return &cloudNodeManagerDeployment{
|
return &CloudNodeManagerDeployment{
|
||||||
ServiceAccount: k8s.ServiceAccount{
|
ServiceAccount: k8s.ServiceAccount{
|
||||||
TypeMeta: meta.TypeMeta{
|
TypeMeta: meta.TypeMeta{
|
||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
@ -182,6 +182,6 @@ func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Marshal marshals the cloud-node-manager deployment as YAML documents.
|
// Marshal marshals the cloud-node-manager deployment as YAML documents.
|
||||||
func (c *cloudNodeManagerDeployment) Marshal() ([]byte, error) {
|
func (c *CloudNodeManagerDeployment) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(c)
|
return kubernetes.MarshalK8SResources(c)
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ func TestCloudNodeManagerMarshalUnmarshal(t *testing.T) {
|
|||||||
data, err := cloudNodeManagerDepl.Marshal()
|
data, err := cloudNodeManagerDepl.Marshal()
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
var recreated cloudNodeManagerDeployment
|
var recreated CloudNodeManagerDeployment
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||||
assert.Equal(cloudNodeManagerDepl, &recreated)
|
assert.Equal(cloudNodeManagerDepl, &recreated)
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type autoscalerDeployment struct {
|
type AutoscalerDeployment struct {
|
||||||
PodDisruptionBudget policy.PodDisruptionBudget
|
PodDisruptionBudget policy.PodDisruptionBudget
|
||||||
ServiceAccount k8s.ServiceAccount
|
ServiceAccount k8s.ServiceAccount
|
||||||
ClusterRole rbac.ClusterRole
|
ClusterRole rbac.ClusterRole
|
||||||
@ -30,8 +30,8 @@ type autoscalerDeployment struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewDefaultAutoscalerDeployment creates a new *autoscalerDeployment, customized for the CSP.
|
// NewDefaultAutoscalerDeployment creates a new *autoscalerDeployment, customized for the CSP.
|
||||||
func NewDefaultAutoscalerDeployment(extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar, k8sVersion versions.ValidK8sVersion) *autoscalerDeployment {
|
func NewDefaultAutoscalerDeployment(extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar, k8sVersion versions.ValidK8sVersion) *AutoscalerDeployment {
|
||||||
return &autoscalerDeployment{
|
return &AutoscalerDeployment{
|
||||||
PodDisruptionBudget: policy.PodDisruptionBudget{
|
PodDisruptionBudget: policy.PodDisruptionBudget{
|
||||||
TypeMeta: v1.TypeMeta{
|
TypeMeta: v1.TypeMeta{
|
||||||
APIVersion: "policy/v1",
|
APIVersion: "policy/v1",
|
||||||
@ -491,6 +491,6 @@ func NewDefaultAutoscalerDeployment(extraVolumes []k8s.Volume, extraVolumeMounts
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *autoscalerDeployment) Marshal() ([]byte, error) {
|
func (a *AutoscalerDeployment) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(a)
|
return kubernetes.MarshalK8SResources(a)
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ func TestAutoscalerDeploymentMarshalUnmarshal(t *testing.T) {
|
|||||||
|
|
||||||
t.Log(string(data))
|
t.Log(string(data))
|
||||||
|
|
||||||
var recreated autoscalerDeployment
|
var recreated AutoscalerDeployment
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||||
assert.Equal(autoscalerDepl, &recreated)
|
assert.Equal(autoscalerDepl, &recreated)
|
||||||
}
|
}
|
||||||
@ -41,7 +41,7 @@ func TestAutoscalerDeploymentWithCommandMarshalUnmarshal(t *testing.T) {
|
|||||||
|
|
||||||
t.Log(string(data))
|
t.Log(string(data))
|
||||||
|
|
||||||
var recreated autoscalerDeployment
|
var recreated AutoscalerDeployment
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||||
assert.Equal(autoscalerDepl, &recreated)
|
assert.Equal(autoscalerDepl, &recreated)
|
||||||
}
|
}
|
||||||
|
@ -14,14 +14,14 @@ import (
|
|||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type gcpGuestAgentDaemonset struct {
|
type GCPGuestAgentDaemonset struct {
|
||||||
DaemonSet apps.DaemonSet
|
DaemonSet apps.DaemonSet
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGCPGuestAgentDaemonset creates a new GCP Guest Agent Daemonset.
|
// NewGCPGuestAgentDaemonset creates a new GCP Guest Agent Daemonset.
|
||||||
// It is used automatically to add loadbalancer IPs to the local routing table of GCP instances.
|
// It is used automatically to add loadbalancer IPs to the local routing table of GCP instances.
|
||||||
func NewGCPGuestAgentDaemonset() *gcpGuestAgentDaemonset {
|
func NewGCPGuestAgentDaemonset() *GCPGuestAgentDaemonset {
|
||||||
return &gcpGuestAgentDaemonset{
|
return &GCPGuestAgentDaemonset{
|
||||||
DaemonSet: apps.DaemonSet{
|
DaemonSet: apps.DaemonSet{
|
||||||
TypeMeta: meta.TypeMeta{
|
TypeMeta: meta.TypeMeta{
|
||||||
APIVersion: "apps/v1",
|
APIVersion: "apps/v1",
|
||||||
@ -178,6 +178,6 @@ func NewGCPGuestAgentDaemonset() *gcpGuestAgentDaemonset {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Marshal marshals the access-manager deployment as YAML documents.
|
// Marshal marshals the access-manager deployment as YAML documents.
|
||||||
func (c *gcpGuestAgentDaemonset) Marshal() ([]byte, error) {
|
func (c *GCPGuestAgentDaemonset) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(c)
|
return kubernetes.MarshalK8SResources(c)
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type joinServiceDaemonset struct {
|
type JoinServiceDaemonset struct {
|
||||||
ClusterRole rbac.ClusterRole
|
ClusterRole rbac.ClusterRole
|
||||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||||
ConfigMap k8s.ConfigMap
|
ConfigMap k8s.ConfigMap
|
||||||
@ -30,17 +30,17 @@ type joinServiceDaemonset struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewJoinServiceDaemonset returns a daemonset for the join service.
|
// NewJoinServiceDaemonset returns a daemonset for the join service.
|
||||||
func NewJoinServiceDaemonset(csp, measurementsJSON, enforcedPCRsJSON, initialIdKeyDigest, enforceIdKeyDigest string, measurementSalt []byte) *joinServiceDaemonset {
|
func NewJoinServiceDaemonset(csp, measurementsJSON, enforcedPCRsJSON, initialIDKeyDigest, enforceIDKeyDigest string, measurementSalt []byte) *JoinServiceDaemonset {
|
||||||
joinConfigData := map[string]string{
|
joinConfigData := map[string]string{
|
||||||
constants.MeasurementsFilename: measurementsJSON,
|
constants.MeasurementsFilename: measurementsJSON,
|
||||||
constants.EnforcedPCRsFilename: enforcedPCRsJSON,
|
constants.EnforcedPCRsFilename: enforcedPCRsJSON,
|
||||||
}
|
}
|
||||||
if cloudprovider.FromString(csp) == cloudprovider.Azure {
|
if cloudprovider.FromString(csp) == cloudprovider.Azure {
|
||||||
joinConfigData[constants.EnforceIdKeyDigestFilename] = enforceIdKeyDigest
|
joinConfigData[constants.EnforceIDKeyDigestFilename] = enforceIDKeyDigest
|
||||||
joinConfigData[constants.IdKeyDigestFilename] = initialIdKeyDigest
|
joinConfigData[constants.IDKeyDigestFilename] = initialIDKeyDigest
|
||||||
}
|
}
|
||||||
|
|
||||||
return &joinServiceDaemonset{
|
return &JoinServiceDaemonset{
|
||||||
ClusterRole: rbac.ClusterRole{
|
ClusterRole: rbac.ClusterRole{
|
||||||
TypeMeta: meta.TypeMeta{
|
TypeMeta: meta.TypeMeta{
|
||||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||||
@ -272,6 +272,6 @@ func NewJoinServiceDaemonset(csp, measurementsJSON, enforcedPCRsJSON, initialIdK
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Marshal the daemonset using the Kubernetes resource marshaller.
|
// Marshal the daemonset using the Kubernetes resource marshaller.
|
||||||
func (a *joinServiceDaemonset) Marshal() ([]byte, error) {
|
func (a *JoinServiceDaemonset) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(a)
|
return kubernetes.MarshalK8SResources(a)
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ func TestNewJoinServiceDaemonset(t *testing.T) {
|
|||||||
deploymentYAML, err := deployment.Marshal()
|
deploymentYAML, err := deployment.Marshal()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var recreated joinServiceDaemonset
|
var recreated JoinServiceDaemonset
|
||||||
require.NoError(t, kubernetes.UnmarshalK8SResources(deploymentYAML, &recreated))
|
require.NoError(t, kubernetes.UnmarshalK8SResources(deploymentYAML, &recreated))
|
||||||
assert.Equal(t, deployment, &recreated)
|
assert.Equal(t, deployment, &recreated)
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
|
|
||||||
const kmsNamespace = "kube-system"
|
const kmsNamespace = "kube-system"
|
||||||
|
|
||||||
type kmsDeployment struct {
|
type KMSDeployment struct {
|
||||||
ServiceAccount k8s.ServiceAccount
|
ServiceAccount k8s.ServiceAccount
|
||||||
Service k8s.Service
|
Service k8s.Service
|
||||||
ClusterRole rbac.ClusterRole
|
ClusterRole rbac.ClusterRole
|
||||||
@ -41,8 +41,8 @@ type KMSConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewKMSDeployment creates a new *kmsDeployment to use as the key management system inside Constellation.
|
// NewKMSDeployment creates a new *kmsDeployment to use as the key management system inside Constellation.
|
||||||
func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
|
func NewKMSDeployment(csp string, config KMSConfig) *KMSDeployment {
|
||||||
return &kmsDeployment{
|
return &KMSDeployment{
|
||||||
ServiceAccount: k8s.ServiceAccount{
|
ServiceAccount: k8s.ServiceAccount{
|
||||||
TypeMeta: meta.TypeMeta{
|
TypeMeta: meta.TypeMeta{
|
||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
@ -254,6 +254,6 @@ func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *kmsDeployment) Marshal() ([]byte, error) {
|
func (c *KMSDeployment) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(c)
|
return kubernetes.MarshalK8SResources(c)
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ func TestKMSMarshalUnmarshal(t *testing.T) {
|
|||||||
data, err := kmsDepl.Marshal()
|
data, err := kmsDepl.Marshal()
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
var recreated kmsDeployment
|
var recreated KMSDeployment
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||||
assert.Equal(kmsDepl, &recreated)
|
assert.Equal(kmsDepl, &recreated)
|
||||||
}
|
}
|
||||||
|
@ -28,22 +28,22 @@ const (
|
|||||||
KonnectivityKeyFilename = "/etc/kubernetes/konnectivity.key"
|
KonnectivityKeyFilename = "/etc/kubernetes/konnectivity.key"
|
||||||
)
|
)
|
||||||
|
|
||||||
type konnectivityAgents struct {
|
type KonnectivityAgents struct {
|
||||||
DaemonSet appsv1.DaemonSet
|
DaemonSet appsv1.DaemonSet
|
||||||
ClusterRoleBinding rbacv1.ClusterRoleBinding
|
ClusterRoleBinding rbacv1.ClusterRoleBinding
|
||||||
ServiceAccount corev1.ServiceAccount
|
ServiceAccount corev1.ServiceAccount
|
||||||
}
|
}
|
||||||
|
|
||||||
type konnectivityServerStaticPod struct {
|
type KonnectivityServerStaticPod struct {
|
||||||
StaticPod corev1.Pod
|
StaticPod corev1.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
type egressSelectorConfiguration struct {
|
type EgressSelectorConfiguration struct {
|
||||||
EgressSelectorConfiguration apiserver.EgressSelectorConfiguration
|
EgressSelectorConfiguration apiserver.EgressSelectorConfiguration
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewKonnectivityAgents(konnectivityServerAddress string) *konnectivityAgents {
|
func NewKonnectivityAgents(konnectivityServerAddress string) *KonnectivityAgents {
|
||||||
return &konnectivityAgents{
|
return &KonnectivityAgents{
|
||||||
DaemonSet: appsv1.DaemonSet{
|
DaemonSet: appsv1.DaemonSet{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
APIVersion: "apps/v1",
|
APIVersion: "apps/v1",
|
||||||
@ -213,9 +213,9 @@ func NewKonnectivityAgents(konnectivityServerAddress string) *konnectivityAgents
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewKonnectivityServerStaticPod() *konnectivityServerStaticPod {
|
func NewKonnectivityServerStaticPod() *KonnectivityServerStaticPod {
|
||||||
udsHostPathType := corev1.HostPathDirectoryOrCreate
|
udsHostPathType := corev1.HostPathDirectoryOrCreate
|
||||||
return &konnectivityServerStaticPod{
|
return &KonnectivityServerStaticPod{
|
||||||
StaticPod: corev1.Pod{
|
StaticPod: corev1.Pod{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
APIVersion: "v1",
|
APIVersion: "v1",
|
||||||
@ -333,8 +333,8 @@ func NewKonnectivityServerStaticPod() *konnectivityServerStaticPod {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEgressSelectorConfiguration() *egressSelectorConfiguration {
|
func NewEgressSelectorConfiguration() *EgressSelectorConfiguration {
|
||||||
return &egressSelectorConfiguration{
|
return &EgressSelectorConfiguration{
|
||||||
EgressSelectorConfiguration: apiserver.EgressSelectorConfiguration{
|
EgressSelectorConfiguration: apiserver.EgressSelectorConfiguration{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
APIVersion: "apiserver.k8s.io/v1beta1",
|
APIVersion: "apiserver.k8s.io/v1beta1",
|
||||||
@ -357,15 +357,15 @@ func NewEgressSelectorConfiguration() *egressSelectorConfiguration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *konnectivityAgents) Marshal() ([]byte, error) {
|
func (v *KonnectivityAgents) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(v)
|
return kubernetes.MarshalK8SResources(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *konnectivityServerStaticPod) Marshal() ([]byte, error) {
|
func (v *KonnectivityServerStaticPod) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(v)
|
return kubernetes.MarshalK8SResources(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *egressSelectorConfiguration) Marshal() ([]byte, error) {
|
func (v *EgressSelectorConfiguration) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(v)
|
return kubernetes.MarshalK8SResources(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ func TestKonnectivityMarshalUnmarshal(t *testing.T) {
|
|||||||
data, err := kmsDepl.Marshal()
|
data, err := kmsDepl.Marshal()
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
var recreated konnectivityAgents
|
var recreated KonnectivityAgents
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||||
assert.Equal(kmsDepl, &recreated)
|
assert.Equal(kmsDepl, &recreated)
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ const (
|
|||||||
nodeMaintenanceOperatorCatalogNamespace = "olm"
|
nodeMaintenanceOperatorCatalogNamespace = "olm"
|
||||||
)
|
)
|
||||||
|
|
||||||
type nodeMaintenanceOperatorDeployment struct {
|
type NodeMaintenanceOperatorDeployment struct {
|
||||||
CatalogSource operatorsv1alpha1.CatalogSource
|
CatalogSource operatorsv1alpha1.CatalogSource
|
||||||
OperatorGroup operatorsv1.OperatorGroup
|
OperatorGroup operatorsv1.OperatorGroup
|
||||||
Subscription operatorsv1alpha1.Subscription
|
Subscription operatorsv1alpha1.Subscription
|
||||||
@ -29,8 +29,8 @@ type nodeMaintenanceOperatorDeployment struct {
|
|||||||
|
|
||||||
// NewNodeMaintenanceOperatorDeployment creates a new node maintenance operator (NMO) deployment.
|
// NewNodeMaintenanceOperatorDeployment creates a new node maintenance operator (NMO) deployment.
|
||||||
// See https://github.com/medik8s/node-maintenance-operator for more information.
|
// See https://github.com/medik8s/node-maintenance-operator for more information.
|
||||||
func NewNodeMaintenanceOperatorDeployment() *nodeMaintenanceOperatorDeployment {
|
func NewNodeMaintenanceOperatorDeployment() *NodeMaintenanceOperatorDeployment {
|
||||||
return &nodeMaintenanceOperatorDeployment{
|
return &NodeMaintenanceOperatorDeployment{
|
||||||
CatalogSource: operatorsv1alpha1.CatalogSource{
|
CatalogSource: operatorsv1alpha1.CatalogSource{
|
||||||
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
|
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -80,6 +80,6 @@ func NewNodeMaintenanceOperatorDeployment() *nodeMaintenanceOperatorDeployment {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *nodeMaintenanceOperatorDeployment) Marshal() ([]byte, error) {
|
func (c *NodeMaintenanceOperatorDeployment) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(c)
|
return kubernetes.MarshalK8SResources(c)
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ func TestNodeMaintenanceOperatorMarshalUnmarshal(t *testing.T) {
|
|||||||
data, err := nmoDepl.Marshal()
|
data, err := nmoDepl.Marshal()
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
var recreated nodeMaintenanceOperatorDeployment
|
var recreated NodeMaintenanceOperatorDeployment
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||||
assert.Equal(nmoDepl, &recreated)
|
assert.Equal(nmoDepl, &recreated)
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
|
|||||||
package resources
|
package resources
|
||||||
|
|
||||||
import (
|
import (
|
||||||
_ "embed"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||||
@ -31,7 +30,7 @@ var NodeOperatorCRDNames = []string{
|
|||||||
"scalinggroups.update.edgeless.systems",
|
"scalinggroups.update.edgeless.systems",
|
||||||
}
|
}
|
||||||
|
|
||||||
type nodeOperatorDeployment struct {
|
type NodeOperatorDeployment struct {
|
||||||
CatalogSource operatorsv1alpha1.CatalogSource
|
CatalogSource operatorsv1alpha1.CatalogSource
|
||||||
OperatorGroup operatorsv1.OperatorGroup
|
OperatorGroup operatorsv1.OperatorGroup
|
||||||
Subscription operatorsv1alpha1.Subscription
|
Subscription operatorsv1alpha1.Subscription
|
||||||
@ -39,8 +38,8 @@ type nodeOperatorDeployment struct {
|
|||||||
|
|
||||||
// NewNodeOperatorDeployment creates a new constellation node operator deployment.
|
// NewNodeOperatorDeployment creates a new constellation node operator deployment.
|
||||||
// See /operators/constellation-node-operator for more information.
|
// See /operators/constellation-node-operator for more information.
|
||||||
func NewNodeOperatorDeployment(cloudProvider string, uid string) *nodeOperatorDeployment {
|
func NewNodeOperatorDeployment(cloudProvider string, uid string) *NodeOperatorDeployment {
|
||||||
return &nodeOperatorDeployment{
|
return &NodeOperatorDeployment{
|
||||||
CatalogSource: operatorsv1alpha1.CatalogSource{
|
CatalogSource: operatorsv1alpha1.CatalogSource{
|
||||||
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
|
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -94,6 +93,6 @@ func NewNodeOperatorDeployment(cloudProvider string, uid string) *nodeOperatorDe
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *nodeOperatorDeployment) Marshal() ([]byte, error) {
|
func (c *NodeOperatorDeployment) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(c)
|
return kubernetes.MarshalK8SResources(c)
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ func TestNodeOperatorMarshalUnmarshal(t *testing.T) {
|
|||||||
data, err := nmoDepl.Marshal()
|
data, err := nmoDepl.Marshal()
|
||||||
require.NoError(err)
|
require.NoError(err)
|
||||||
|
|
||||||
var recreated nodeOperatorDeployment
|
var recreated NodeOperatorDeployment
|
||||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||||
assert.Equal(nmoDepl, &recreated)
|
assert.Equal(nmoDepl, &recreated)
|
||||||
}
|
}
|
||||||
|
@ -21,13 +21,13 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
)
|
)
|
||||||
|
|
||||||
type verificationDaemonset struct {
|
type VerificationDaemonset struct {
|
||||||
DaemonSet apps.DaemonSet
|
DaemonSet apps.DaemonSet
|
||||||
Service k8s.Service
|
Service k8s.Service
|
||||||
LoadBalancer k8s.Service
|
LoadBalancer k8s.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewVerificationDaemonSet(csp, loadBalancerIP string) *verificationDaemonset {
|
func NewVerificationDaemonSet(csp, loadBalancerIP string) *VerificationDaemonset {
|
||||||
var err error
|
var err error
|
||||||
if strings.Contains(loadBalancerIP, ":") {
|
if strings.Contains(loadBalancerIP, ":") {
|
||||||
loadBalancerIP, _, err = net.SplitHostPort(loadBalancerIP)
|
loadBalancerIP, _, err = net.SplitHostPort(loadBalancerIP)
|
||||||
@ -35,7 +35,7 @@ func NewVerificationDaemonSet(csp, loadBalancerIP string) *verificationDaemonset
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &verificationDaemonset{
|
return &VerificationDaemonset{
|
||||||
DaemonSet: apps.DaemonSet{
|
DaemonSet: apps.DaemonSet{
|
||||||
TypeMeta: meta.TypeMeta{
|
TypeMeta: meta.TypeMeta{
|
||||||
APIVersion: "apps/v1",
|
APIVersion: "apps/v1",
|
||||||
@ -188,6 +188,6 @@ func NewVerificationDaemonSet(csp, loadBalancerIP string) *verificationDaemonset
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *verificationDaemonset) Marshal() ([]byte, error) {
|
func (v *VerificationDaemonset) Marshal() ([]byte, error) {
|
||||||
return kubernetes.MarshalK8SResources(v)
|
return kubernetes.MarshalK8SResources(v)
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ func TestNewVerificationDaemonset(t *testing.T) {
|
|||||||
deploymentYAML, err := deployment.Marshal()
|
deploymentYAML, err := deployment.Marshal()
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var recreated verificationDaemonset
|
var recreated VerificationDaemonset
|
||||||
require.NoError(t, kubernetes.UnmarshalK8SResources(deploymentYAML, &recreated))
|
require.NoError(t, kubernetes.UnmarshalK8SResources(deploymentYAML, &recreated))
|
||||||
assert.Equal(t, deployment, &recreated)
|
assert.Equal(t, deployment, &recreated)
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura
|
|||||||
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
||||||
func (k *KubeWrapper) InitCluster(
|
func (k *KubeWrapper) InitCluster(
|
||||||
ctx context.Context, cloudServiceAccountURI, versionString string, measurementSalt []byte, enforcedPCRs []uint32,
|
ctx context.Context, cloudServiceAccountURI, versionString string, measurementSalt []byte, enforcedPCRs []uint32,
|
||||||
enforceIdKeyDigest bool, idKeyDigest []byte, azureCVM bool, kmsConfig resources.KMSConfig, sshUsers map[string]string,
|
enforceIDKeyDigest bool, idKeyDigest []byte, azureCVM bool, kmsConfig resources.KMSConfig, sshUsers map[string]string,
|
||||||
helmDeployments []byte, conformanceMode bool, log *logger.Logger,
|
helmDeployments []byte, conformanceMode bool, log *logger.Logger,
|
||||||
) ([]byte, error) {
|
) ([]byte, error) {
|
||||||
k8sVersion, err := versions.NewValidK8sVersion(versionString)
|
k8sVersion, err := versions.NewValidK8sVersion(versionString)
|
||||||
@ -200,7 +200,7 @@ func (k *KubeWrapper) InitCluster(
|
|||||||
return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err)
|
return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := k.setupJoinService(k.cloudProvider, k.initialMeasurementsJSON, measurementSalt, enforcedPCRs, idKeyDigest, enforceIdKeyDigest); err != nil {
|
if err := k.setupJoinService(k.cloudProvider, k.initialMeasurementsJSON, measurementSalt, enforcedPCRs, idKeyDigest, enforceIDKeyDigest); err != nil {
|
||||||
return nil, fmt.Errorf("setting up join service failed: %w", err)
|
return nil, fmt.Errorf("setting up join service failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -321,7 +321,7 @@ func (k *KubeWrapper) GetKubeconfig() ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (k *KubeWrapper) setupJoinService(
|
func (k *KubeWrapper) setupJoinService(
|
||||||
csp string, measurementsJSON, measurementSalt []byte, enforcedPCRs []uint32, initialIdKeyDigest []byte, enforceIdKeyDigest bool,
|
csp string, measurementsJSON, measurementSalt []byte, enforcedPCRs []uint32, initialIDKeyDigest []byte, enforceIDKeyDigest bool,
|
||||||
) error {
|
) error {
|
||||||
enforcedPCRsJSON, err := json.Marshal(enforcedPCRs)
|
enforcedPCRsJSON, err := json.Marshal(enforcedPCRs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -329,7 +329,7 @@ func (k *KubeWrapper) setupJoinService(
|
|||||||
}
|
}
|
||||||
|
|
||||||
joinConfiguration := resources.NewJoinServiceDaemonset(
|
joinConfiguration := resources.NewJoinServiceDaemonset(
|
||||||
csp, string(measurementsJSON), string(enforcedPCRsJSON), hex.EncodeToString(initialIdKeyDigest), strconv.FormatBool(enforceIdKeyDigest), measurementSalt,
|
csp, string(measurementsJSON), string(enforcedPCRsJSON), hex.EncodeToString(initialIDKeyDigest), strconv.FormatBool(enforceIDKeyDigest), measurementSalt,
|
||||||
)
|
)
|
||||||
|
|
||||||
return k.clusterUtil.SetupJoinService(k.client, joinConfiguration)
|
return k.clusterUtil.SetupJoinService(k.client, joinConfiguration)
|
||||||
|
@ -29,7 +29,7 @@ type Validator struct {
|
|||||||
pcrs map[uint32][]byte
|
pcrs map[uint32][]byte
|
||||||
enforcedPCRs []uint32
|
enforcedPCRs []uint32
|
||||||
idkeydigest []byte
|
idkeydigest []byte
|
||||||
enforceIdKeyDigest bool
|
enforceIDKeyDigest bool
|
||||||
azureCVM bool
|
azureCVM bool
|
||||||
validator atls.Validator
|
validator atls.Validator
|
||||||
}
|
}
|
||||||
@ -47,11 +47,11 @@ func NewValidator(provider cloudprovider.Provider, config *config.Config) (*Vali
|
|||||||
if v.provider == cloudprovider.Azure {
|
if v.provider == cloudprovider.Azure {
|
||||||
v.azureCVM = *config.Provider.Azure.ConfidentialVM
|
v.azureCVM = *config.Provider.Azure.ConfidentialVM
|
||||||
if v.azureCVM {
|
if v.azureCVM {
|
||||||
idkeydigest, err := hex.DecodeString(config.Provider.Azure.IdKeyDigest)
|
idkeydigest, err := hex.DecodeString(config.Provider.Azure.IDKeyDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("bad config: decoding idkeydigest from config: %w", err)
|
return nil, fmt.Errorf("bad config: decoding idkeydigest from config: %w", err)
|
||||||
}
|
}
|
||||||
v.enforceIdKeyDigest = *config.Provider.Azure.EnforceIdKeyDigest
|
v.enforceIDKeyDigest = *config.Provider.Azure.EnforceIDKeyDigest
|
||||||
v.idkeydigest = idkeydigest
|
v.idkeydigest = idkeydigest
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -146,7 +146,7 @@ func (v *Validator) updateValidator(cmd *cobra.Command) {
|
|||||||
v.validator = gcp.NewValidator(v.pcrs, v.enforcedPCRs, log)
|
v.validator = gcp.NewValidator(v.pcrs, v.enforcedPCRs, log)
|
||||||
case cloudprovider.Azure:
|
case cloudprovider.Azure:
|
||||||
if v.azureCVM {
|
if v.azureCVM {
|
||||||
v.validator = snp.NewValidator(v.pcrs, v.enforcedPCRs, v.idkeydigest, v.enforceIdKeyDigest, log)
|
v.validator = snp.NewValidator(v.pcrs, v.enforcedPCRs, v.idkeydigest, v.enforceIDKeyDigest, log)
|
||||||
} else {
|
} else {
|
||||||
v.validator = trustedlaunch.NewValidator(v.pcrs, v.enforcedPCRs, log)
|
v.validator = trustedlaunch.NewValidator(v.pcrs, v.enforcedPCRs, log)
|
||||||
}
|
}
|
||||||
|
@ -39,8 +39,8 @@ func TestNewValidator(t *testing.T) {
|
|||||||
provider cloudprovider.Provider
|
provider cloudprovider.Provider
|
||||||
config *config.Config
|
config *config.Config
|
||||||
pcrs map[uint32][]byte
|
pcrs map[uint32][]byte
|
||||||
enforceIdKeyDigest bool
|
enforceIDKeyDigest bool
|
||||||
idkeydigest string
|
idKeyDigest string
|
||||||
azureCVM bool
|
azureCVM bool
|
||||||
wantErr bool
|
wantErr bool
|
||||||
}{
|
}{
|
||||||
@ -80,14 +80,14 @@ func TestNewValidator(t *testing.T) {
|
|||||||
"set idkeydigest": {
|
"set idkeydigest": {
|
||||||
provider: cloudprovider.Azure,
|
provider: cloudprovider.Azure,
|
||||||
pcrs: testPCRs,
|
pcrs: testPCRs,
|
||||||
idkeydigest: "414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141",
|
idKeyDigest: "414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141",
|
||||||
enforceIdKeyDigest: true,
|
enforceIDKeyDigest: true,
|
||||||
},
|
},
|
||||||
"invalid idkeydigest": {
|
"invalid idkeydigest": {
|
||||||
provider: cloudprovider.Azure,
|
provider: cloudprovider.Azure,
|
||||||
pcrs: testPCRs,
|
pcrs: testPCRs,
|
||||||
idkeydigest: "41414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141414",
|
idKeyDigest: "41414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141414",
|
||||||
enforceIdKeyDigest: true,
|
enforceIDKeyDigest: true,
|
||||||
azureCVM: true,
|
azureCVM: true,
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
@ -104,7 +104,7 @@ func TestNewValidator(t *testing.T) {
|
|||||||
}
|
}
|
||||||
if tc.provider == cloudprovider.Azure {
|
if tc.provider == cloudprovider.Azure {
|
||||||
measurements := config.Measurements(tc.pcrs)
|
measurements := config.Measurements(tc.pcrs)
|
||||||
conf.Provider.Azure = &config.AzureConfig{Measurements: measurements, EnforceIdKeyDigest: &tc.enforceIdKeyDigest, IdKeyDigest: tc.idkeydigest, ConfidentialVM: &tc.azureCVM}
|
conf.Provider.Azure = &config.AzureConfig{Measurements: measurements, EnforceIDKeyDigest: &tc.enforceIDKeyDigest, IDKeyDigest: tc.idKeyDigest, ConfidentialVM: &tc.azureCVM}
|
||||||
}
|
}
|
||||||
if tc.provider == cloudprovider.QEMU {
|
if tc.provider == cloudprovider.QEMU {
|
||||||
measurements := config.Measurements(tc.pcrs)
|
measurements := config.Measurements(tc.pcrs)
|
||||||
|
@ -77,7 +77,7 @@ func create(cmd *cobra.Command, creator cloudCreator, fileHandler file.Handler)
|
|||||||
if config.IsAzureNonCVM() {
|
if config.IsAzureNonCVM() {
|
||||||
cmd.Println("Disabling Confidential VMs is insecure. Use only for evaluation purposes.")
|
cmd.Println("Disabling Confidential VMs is insecure. Use only for evaluation purposes.")
|
||||||
printedAWarning = true
|
printedAWarning = true
|
||||||
if config.EnforcesIdKeyDigest() {
|
if config.EnforcesIDKeyDigest() {
|
||||||
cmd.Println("Your config asks for enforcing the idkeydigest. This is only available on Confidential VMs. It will not be enforced.")
|
cmd.Println("Your config asks for enforcing the idkeydigest. This is only available on Confidential VMs. It will not be enforced.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -138,7 +138,7 @@ func initialize(cmd *cobra.Command, newDialer func(validator *cloudcmd.Validator
|
|||||||
SshUserKeys: ssh.ToProtoSlice(sshUsers),
|
SshUserKeys: ssh.ToProtoSlice(sshUsers),
|
||||||
HelmDeployments: helmDeployments,
|
HelmDeployments: helmDeployments,
|
||||||
EnforcedPcrs: getEnforcedMeasurements(provider, config),
|
EnforcedPcrs: getEnforcedMeasurements(provider, config),
|
||||||
EnforceIdkeydigest: getEnforceIdKeyDigest(provider, config),
|
EnforceIdkeydigest: getEnforceIDKeyDigest(provider, config),
|
||||||
ConformanceMode: flags.conformance,
|
ConformanceMode: flags.conformance,
|
||||||
}
|
}
|
||||||
resp, err := initCall(cmd.Context(), newDialer(validator), flags.endpoint, req)
|
resp, err := initCall(cmd.Context(), newDialer(validator), flags.endpoint, req)
|
||||||
@ -237,10 +237,10 @@ func getEnforcedMeasurements(provider cloudprovider.Provider, config *config.Con
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getEnforceIdKeyDigest(provider cloudprovider.Provider, config *config.Config) bool {
|
func getEnforceIDKeyDigest(provider cloudprovider.Provider, config *config.Config) bool {
|
||||||
switch provider {
|
switch provider {
|
||||||
case cloudprovider.Azure:
|
case cloudprovider.Azure:
|
||||||
return *config.Provider.Azure.EnforceIdKeyDigest
|
return *config.Provider.Azure.EnforceIDKeyDigest
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ func main() {
|
|||||||
}
|
}
|
||||||
sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, ssh, download)
|
sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, ssh, download)
|
||||||
serv := server.New(log.Named("server"), ssh, serviceManager, streamer)
|
serv := server.New(log.Named("server"), ssh, serviceManager, streamer)
|
||||||
if err := deploy.DeployDefaultServiceUnit(ctx, serviceManager); err != nil {
|
if err := deploy.DefaultServiceUnit(ctx, serviceManager); err != nil {
|
||||||
log.Fatalf("%s", err)
|
log.Fatalf("%s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,13 +179,13 @@ type fakeDownloadServer struct {
|
|||||||
pb.UnimplementedDebugdServer
|
pb.UnimplementedDebugdServer
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeDownloadServer) DownloadBootstrapper(request *pb.DownloadBootstrapperRequest, stream pb.Debugd_DownloadBootstrapperServer) error {
|
func (s *fakeDownloadServer) DownloadBootstrapper(request *pb.DownloadBootstrapperRequest, stream pb.Debugd_DownloadBootstrapperServer) error {
|
||||||
for _, chunk := range f.chunks {
|
for _, chunk := range s.chunks {
|
||||||
if err := stream.Send(&pb.Chunk{Content: chunk}); err != nil {
|
if err := stream.Send(&pb.Chunk{Content: chunk}); err != nil {
|
||||||
return fmt.Errorf("sending chunk: %w", err)
|
return fmt.Errorf("sending chunk: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return f.downladErr
|
return s.downladErr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *fakeDownloadServer) DownloadAuthorizedKeys(context.Context, *pb.DownloadAuthorizedKeysRequest) (*pb.DownloadAuthorizedKeysResponse, error) {
|
func (s *fakeDownloadServer) DownloadAuthorizedKeys(context.Context, *pb.DownloadAuthorizedKeysRequest) (*pb.DownloadAuthorizedKeysResponse, error) {
|
||||||
|
@ -146,8 +146,8 @@ func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdU
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeployDefaultServiceUnit will write the default "bootstrapper.service" unit file.
|
// DefaultServiceUnit will write the default "bootstrapper.service" unit file.
|
||||||
func DeployDefaultServiceUnit(ctx context.Context, serviceManager *ServiceManager) error {
|
func DefaultServiceUnit(ctx context.Context, serviceManager *ServiceManager) error {
|
||||||
if err := serviceManager.WriteSystemdUnitFile(ctx, SystemdUnit{
|
if err := serviceManager.WriteSystemdUnitFile(ctx, SystemdUnit{
|
||||||
Name: debugd.BootstrapperSystemdUnitName,
|
Name: debugd.BootstrapperSystemdUnitName,
|
||||||
Contents: debugd.BootstrapperSystemdUnitContents,
|
Contents: debugd.BootstrapperSystemdUnitContents,
|
||||||
|
@ -130,19 +130,19 @@ func (s *RecoveryServer) Recover(stream recoverproto.API_RecoverServer) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// stubServer implements the RecoveryServer interface but does not actually start a server.
|
// StubServer implements the RecoveryServer interface but does not actually start a server.
|
||||||
type stubServer struct {
|
type StubServer struct {
|
||||||
log *logger.Logger
|
log *logger.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStub returns a new stubbed RecoveryServer.
|
// NewStub returns a new stubbed RecoveryServer.
|
||||||
// We use this to avoid having to start a server for worker nodes, since they don't require manual recovery.
|
// We use this to avoid having to start a server for worker nodes, since they don't require manual recovery.
|
||||||
func NewStub(log *logger.Logger) *stubServer {
|
func NewStub(log *logger.Logger) *StubServer {
|
||||||
return &stubServer{log: log}
|
return &StubServer{log: log}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serve waits until the context is canceled and returns nil.
|
// Serve waits until the context is canceled and returns nil.
|
||||||
func (s *stubServer) Serve(ctx context.Context, _ net.Listener, _ string) ([]byte, []byte, error) {
|
func (s *StubServer) Serve(ctx context.Context, _ net.Listener, _ string) ([]byte, []byte, error) {
|
||||||
s.log.Infof("Running as worker node, skipping recovery server")
|
s.log.Infof("Running as worker node, skipping recovery server")
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
return nil, nil, ctx.Err()
|
return nil, nil, ctx.Err()
|
||||||
|
@ -38,8 +38,8 @@ const (
|
|||||||
stateInfoPath = stateDiskMountPath + "/constellation/node_state.json"
|
stateInfoPath = stateDiskMountPath + "/constellation/node_state.json"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SetupManager handles formatting, mapping, mounting and unmounting of state disks.
|
// Manager handles formatting, mapping, mounting and unmounting of state disks.
|
||||||
type SetupManager struct {
|
type Manager struct {
|
||||||
log *logger.Logger
|
log *logger.Logger
|
||||||
csp string
|
csp string
|
||||||
diskPath string
|
diskPath string
|
||||||
@ -53,8 +53,8 @@ type SetupManager struct {
|
|||||||
// New initializes a SetupManager with the given parameters.
|
// New initializes a SetupManager with the given parameters.
|
||||||
func New(log *logger.Logger, csp string, diskPath string, fs afero.Afero,
|
func New(log *logger.Logger, csp string, diskPath string, fs afero.Afero,
|
||||||
mapper DeviceMapper, mounter Mounter, openTPM vtpm.TPMOpenFunc,
|
mapper DeviceMapper, mounter Mounter, openTPM vtpm.TPMOpenFunc,
|
||||||
) *SetupManager {
|
) *Manager {
|
||||||
return &SetupManager{
|
return &Manager{
|
||||||
log: log,
|
log: log,
|
||||||
csp: csp,
|
csp: csp,
|
||||||
diskPath: diskPath,
|
diskPath: diskPath,
|
||||||
@ -68,7 +68,7 @@ func New(log *logger.Logger, csp string, diskPath string, fs afero.Afero,
|
|||||||
|
|
||||||
// PrepareExistingDisk requests and waits for a decryption key to remap the encrypted state disk.
|
// PrepareExistingDisk requests and waits for a decryption key to remap the encrypted state disk.
|
||||||
// Once the disk is mapped, the function taints the node as initialized by updating it's PCRs.
|
// Once the disk is mapped, the function taints the node as initialized by updating it's PCRs.
|
||||||
func (s *SetupManager) PrepareExistingDisk(recover RecoveryDoer) error {
|
func (s *Manager) PrepareExistingDisk(recover RecoveryDoer) error {
|
||||||
s.log.Infof("Preparing existing state disk")
|
s.log.Infof("Preparing existing state disk")
|
||||||
uuid := s.mapper.DiskUUID()
|
uuid := s.mapper.DiskUUID()
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ func (s *SetupManager) PrepareExistingDisk(recover RecoveryDoer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PrepareNewDisk prepares an instances state disk by formatting the disk as a LUKS device using a random passphrase.
|
// PrepareNewDisk prepares an instances state disk by formatting the disk as a LUKS device using a random passphrase.
|
||||||
func (s *SetupManager) PrepareNewDisk() error {
|
func (s *Manager) PrepareNewDisk() error {
|
||||||
s.log.Infof("Preparing new state disk")
|
s.log.Infof("Preparing new state disk")
|
||||||
|
|
||||||
// generate and save temporary passphrase
|
// generate and save temporary passphrase
|
||||||
@ -132,7 +132,7 @@ func (s *SetupManager) PrepareNewDisk() error {
|
|||||||
return s.mapper.MapDisk(stateDiskMappedName, string(passphrase))
|
return s.mapper.MapDisk(stateDiskMappedName, string(passphrase))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SetupManager) readMeasurementSalt(path string) ([]byte, error) {
|
func (s *Manager) readMeasurementSalt(path string) ([]byte, error) {
|
||||||
handler := file.NewHandler(s.fs)
|
handler := file.NewHandler(s.fs)
|
||||||
var state nodestate.NodeState
|
var state nodestate.NodeState
|
||||||
if err := handler.ReadJSON(path, &state); err != nil {
|
if err := handler.ReadJSON(path, &state); err != nil {
|
||||||
@ -147,7 +147,7 @@ func (s *SetupManager) readMeasurementSalt(path string) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// saveConfiguration saves the given passphrase and cryptsetup mapping configuration to disk.
|
// saveConfiguration saves the given passphrase and cryptsetup mapping configuration to disk.
|
||||||
func (s *SetupManager) saveConfiguration(passphrase []byte) error {
|
func (s *Manager) saveConfiguration(passphrase []byte) error {
|
||||||
// passphrase
|
// passphrase
|
||||||
if err := s.fs.MkdirAll(keyPath, os.ModePerm); err != nil {
|
if err := s.fs.MkdirAll(keyPath, os.ModePerm); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -168,14 +168,14 @@ type RejoinClient interface {
|
|||||||
Start(context.Context, string) (key, secret []byte)
|
Start(context.Context, string) (key, secret []byte)
|
||||||
}
|
}
|
||||||
|
|
||||||
type nodeRecoverer struct {
|
type NodeRecoverer struct {
|
||||||
recoveryServer RecoveryServer
|
recoveryServer RecoveryServer
|
||||||
rejoinClient RejoinClient
|
rejoinClient RejoinClient
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewNodeRecoverer initializes a new nodeRecoverer.
|
// NewNodeRecoverer initializes a new nodeRecoverer.
|
||||||
func NewNodeRecoverer(recoveryServer RecoveryServer, rejoinClient RejoinClient) *nodeRecoverer {
|
func NewNodeRecoverer(recoveryServer RecoveryServer, rejoinClient RejoinClient) *NodeRecoverer {
|
||||||
return &nodeRecoverer{
|
return &NodeRecoverer{
|
||||||
recoveryServer: recoveryServer,
|
recoveryServer: recoveryServer,
|
||||||
rejoinClient: rejoinClient,
|
rejoinClient: rejoinClient,
|
||||||
}
|
}
|
||||||
@ -184,7 +184,7 @@ func NewNodeRecoverer(recoveryServer RecoveryServer, rejoinClient RejoinClient)
|
|||||||
// Do performs a recovery procedure on the given state disk.
|
// Do performs a recovery procedure on the given state disk.
|
||||||
// The method starts a gRPC server to allow manual recovery by a user.
|
// The method starts a gRPC server to allow manual recovery by a user.
|
||||||
// At the same time it tries to request a decryption key from all available Constellation control-plane nodes.
|
// At the same time it tries to request a decryption key from all available Constellation control-plane nodes.
|
||||||
func (r *nodeRecoverer) Do(uuid, endpoint string) (passphrase, measurementSecret []byte, err error) {
|
func (r *NodeRecoverer) Do(uuid, endpoint string) (passphrase, measurementSecret []byte, err error) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
lis, err := net.Listen("tcp", endpoint)
|
lis, err := net.Listen("tcp", endpoint)
|
||||||
|
@ -135,7 +135,7 @@ func TestPrepareExistingDisk(t *testing.T) {
|
|||||||
require.NoError(t, handler.WriteJSON(stateInfoPath, nodestate.NodeState{MeasurementSalt: salt}, file.OptMkdirAll))
|
require.NoError(t, handler.WriteJSON(stateInfoPath, nodestate.NodeState{MeasurementSalt: salt}, file.OptMkdirAll))
|
||||||
}
|
}
|
||||||
|
|
||||||
setupManager := &SetupManager{
|
setupManager := &Manager{
|
||||||
log: logger.NewTest(t),
|
log: logger.NewTest(t),
|
||||||
csp: "test",
|
csp: "test",
|
||||||
diskPath: "disk-path",
|
diskPath: "disk-path",
|
||||||
@ -213,7 +213,7 @@ func TestPrepareNewDisk(t *testing.T) {
|
|||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
setupManager := &SetupManager{
|
setupManager := &Manager{
|
||||||
log: logger.NewTest(t),
|
log: logger.NewTest(t),
|
||||||
csp: "test",
|
csp: "test",
|
||||||
diskPath: "disk-path",
|
diskPath: "disk-path",
|
||||||
|
@ -16,9 +16,8 @@ import (
|
|||||||
// NewIssuer returns an SNP issuer if it can successfully read the idkeydigest from the TPM.
|
// NewIssuer returns an SNP issuer if it can successfully read the idkeydigest from the TPM.
|
||||||
// Otherwise returns a Trusted Launch issuer.
|
// Otherwise returns a Trusted Launch issuer.
|
||||||
func NewIssuer() atls.Issuer {
|
func NewIssuer() atls.Issuer {
|
||||||
if _, err := snp.GetIdKeyDigest(vtpm.OpenVTPM); err == nil {
|
if _, err := snp.GetIDKeyDigest(vtpm.OpenVTPM); err == nil {
|
||||||
return snp.NewIssuer()
|
return snp.NewIssuer()
|
||||||
} else {
|
|
||||||
return trustedlaunch.NewIssuer()
|
|
||||||
}
|
}
|
||||||
|
return trustedlaunch.NewIssuer()
|
||||||
}
|
}
|
||||||
|
@ -28,8 +28,8 @@ const (
|
|||||||
tpmAkIdx = 0x81000003
|
tpmAkIdx = 0x81000003
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetIdKeyDigest reads the idkeydigest from the snp report saved in the TPM's non-volatile memory.
|
// GetIDKeyDigest reads the idkeydigest from the snp report saved in the TPM's non-volatile memory.
|
||||||
func GetIdKeyDigest(open vtpm.TPMOpenFunc) ([]byte, error) {
|
func GetIDKeyDigest(open vtpm.TPMOpenFunc) ([]byte, error) {
|
||||||
tpm, err := open()
|
tpm, err := open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -75,7 +75,7 @@ func NewIssuer() *Issuer {
|
|||||||
// The attestation report is loaded from the TPM, the certificate chain is queried
|
// The attestation report is loaded from the TPM, the certificate chain is queried
|
||||||
// from the cloud metadata API.
|
// from the cloud metadata API.
|
||||||
// [1] https://github.com/AMDESE/sev-guest/blob/main/include/attestation.h
|
// [1] https://github.com/AMDESE/sev-guest/blob/main/include/attestation.h
|
||||||
func getInstanceInfo(reportGetter tpmReportGetter, imdsAPI imdsApi) func(tpm io.ReadWriteCloser) ([]byte, error) {
|
func getInstanceInfo(reportGetter tpmReportGetter, imdsapi imdsAPI) func(tpm io.ReadWriteCloser) ([]byte, error) {
|
||||||
return func(tpm io.ReadWriteCloser) ([]byte, error) {
|
return func(tpm io.ReadWriteCloser) ([]byte, error) {
|
||||||
hclReport, err := reportGetter.get(tpm)
|
hclReport, err := reportGetter.get(tpm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -88,7 +88,7 @@ func getInstanceInfo(reportGetter tpmReportGetter, imdsAPI imdsApi) func(tpm io.
|
|||||||
|
|
||||||
runtimeData, _, _ := bytes.Cut(hclReport[lenSnpReport+lenSnpReportRuntimeDataPadding:], []byte{0})
|
runtimeData, _, _ := bytes.Cut(hclReport[lenSnpReport+lenSnpReportRuntimeDataPadding:], []byte{0})
|
||||||
|
|
||||||
vcekResponse, err := imdsAPI.getVcek(context.TODO())
|
vcekResponse, err := imdsapi.getVcek(context.TODO())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("getVcekFromIMDS: %w", err)
|
return nil, fmt.Errorf("getVcekFromIMDS: %w", err)
|
||||||
}
|
}
|
||||||
@ -128,6 +128,6 @@ type tpmReportGetter interface {
|
|||||||
get(tpm io.ReadWriteCloser) ([]byte, error)
|
get(tpm io.ReadWriteCloser) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type imdsApi interface {
|
type imdsAPI interface {
|
||||||
getVcek(ctx context.Context) (vcekResponse, error)
|
getVcek(ctx context.Context) (vcekResponse, error)
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ func TestGetSNPAttestation(t *testing.T) {
|
|||||||
err: nil,
|
err: nil,
|
||||||
}
|
}
|
||||||
|
|
||||||
attestationJson, err := getInstanceInfo(&snpAttestationReport, imdsClient)(tpm)
|
attestationJSON, err := getInstanceInfo(&snpAttestationReport, imdsClient)(tpm)
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
return
|
return
|
||||||
@ -75,7 +75,7 @@ func TestGetSNPAttestation(t *testing.T) {
|
|||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
|
|
||||||
var instanceInfo azureInstanceInfo
|
var instanceInfo azureInstanceInfo
|
||||||
err = json.Unmarshal(attestationJson, &instanceInfo)
|
err = json.Unmarshal(attestationJSON, &instanceInfo)
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
|
|
||||||
if tc.wantErr {
|
if tc.wantErr {
|
||||||
|
@ -159,16 +159,16 @@ func validateSNPReport(cert *x509.Certificate, expectedIDKeyDigest []byte, enfor
|
|||||||
return fmt.Errorf("mismatching vcek extensions: %w", err)
|
return fmt.Errorf("mismatching vcek extensions: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
sig_r := report.Signature.R[:]
|
sigR := report.Signature.R[:]
|
||||||
sig_s := report.Signature.S[:]
|
sigS := report.Signature.S[:]
|
||||||
|
|
||||||
// Table 107 in https://www.amd.com/system/files/TechDocs/56860.pdf mentions little endian signature components.
|
// Table 107 in https://www.amd.com/system/files/TechDocs/56860.pdf mentions little endian signature components.
|
||||||
// They come out of the certificate as big endian.
|
// They come out of the certificate as big endian.
|
||||||
reverseEndian(sig_r)
|
reverseEndian(sigR)
|
||||||
reverseEndian(sig_s)
|
reverseEndian(sigS)
|
||||||
|
|
||||||
rParam := new(big.Int).SetBytes(sig_r)
|
rParam := new(big.Int).SetBytes(sigR)
|
||||||
sParam := new(big.Int).SetBytes(sig_s)
|
sParam := new(big.Int).SetBytes(sigS)
|
||||||
sequence := ecdsaSig{rParam, sParam}
|
sequence := ecdsaSig{rParam, sParam}
|
||||||
sigEncoded, err := asn1.Marshal(sequence)
|
sigEncoded, err := asn1.Marshal(sequence)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
File diff suppressed because one or more lines are too long
@ -135,10 +135,10 @@ type AzureConfig struct {
|
|||||||
EnforcedMeasurements []uint32 `yaml:"enforcedMeasurements"`
|
EnforcedMeasurements []uint32 `yaml:"enforcedMeasurements"`
|
||||||
// description: |
|
// description: |
|
||||||
// Expected value for the field 'idkeydigest' in the AMD SEV-SNP attestation report. Only usable with ConfidentialVMs. See 4.6 and 7.3 in: https://www.amd.com/system/files/TechDocs/56860.pdf
|
// Expected value for the field 'idkeydigest' in the AMD SEV-SNP attestation report. Only usable with ConfidentialVMs. See 4.6 and 7.3 in: https://www.amd.com/system/files/TechDocs/56860.pdf
|
||||||
IdKeyDigest string `yaml:"idKeyDigest" validate:"required_if=EnforceIdKeyDigest true,omitempty,hexadecimal,len=96"`
|
IDKeyDigest string `yaml:"idKeyDigest" validate:"required_if=EnforceIdKeyDigest true,omitempty,hexadecimal,len=96"`
|
||||||
// description: |
|
// description: |
|
||||||
// Enforce the specified idKeyDigest value during remote attestation.
|
// Enforce the specified idKeyDigest value during remote attestation.
|
||||||
EnforceIdKeyDigest *bool `yaml:"enforceIdKeyDigest" validate:"required"`
|
EnforceIDKeyDigest *bool `yaml:"enforceIdKeyDigest" validate:"required"`
|
||||||
// description: |
|
// description: |
|
||||||
// Use Confidential VMs. If set to false, Trusted Launch VMs are used instead. See: https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview
|
// Use Confidential VMs. If set to false, Trusted Launch VMs are used instead. See: https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview
|
||||||
ConfidentialVM *bool `yaml:"confidentialVM" validate:"required"`
|
ConfidentialVM *bool `yaml:"confidentialVM" validate:"required"`
|
||||||
@ -223,8 +223,8 @@ func Default() *Config {
|
|||||||
StateDiskType: "Premium_LRS",
|
StateDiskType: "Premium_LRS",
|
||||||
Measurements: copyPCRMap(azurePCRs),
|
Measurements: copyPCRMap(azurePCRs),
|
||||||
EnforcedMeasurements: []uint32{4, 8, 9, 11, 12},
|
EnforcedMeasurements: []uint32{4, 8, 9, 11, 12},
|
||||||
IdKeyDigest: "57486a447ec0f1958002a22a06b7673b9fd27d11e1c6527498056054c5fa92d23c50f9de44072760fe2b6fb89740b696",
|
IDKeyDigest: "57486a447ec0f1958002a22a06b7673b9fd27d11e1c6527498056054c5fa92d23c50f9de44072760fe2b6fb89740b696",
|
||||||
EnforceIdKeyDigest: func() *bool { b := true; return &b }(),
|
EnforceIDKeyDigest: func() *bool { b := true; return &b }(),
|
||||||
ConfidentialVM: func() *bool { b := true; return &b }(),
|
ConfidentialVM: func() *bool { b := true; return &b }(),
|
||||||
},
|
},
|
||||||
GCP: &GCPConfig{
|
GCP: &GCPConfig{
|
||||||
@ -509,8 +509,8 @@ func (c *Config) IsAzureNonCVM() bool {
|
|||||||
return c.Provider.Azure != nil && c.Provider.Azure.ConfidentialVM != nil && !*c.Provider.Azure.ConfidentialVM
|
return c.Provider.Azure != nil && c.Provider.Azure.ConfidentialVM != nil && !*c.Provider.Azure.ConfidentialVM
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Config) EnforcesIdKeyDigest() bool {
|
func (c *Config) EnforcesIDKeyDigest() bool {
|
||||||
return c.Provider.Azure != nil && c.Provider.Azure.EnforceIdKeyDigest != nil && *c.Provider.Azure.EnforceIdKeyDigest
|
return c.Provider.Azure != nil && c.Provider.Azure.EnforceIDKeyDigest != nil && *c.Provider.Azure.EnforceIDKeyDigest
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromFile returns config file with `name` read from `fileHandler` by parsing
|
// FromFile returns config file with `name` read from `fileHandler` by parsing
|
||||||
|
@ -81,10 +81,10 @@ const (
|
|||||||
MeasurementSaltFilename = "measurementSalt"
|
MeasurementSaltFilename = "measurementSalt"
|
||||||
// MeasurementSecretFilename is the filename of the secret used in creation of the clusterID.
|
// MeasurementSecretFilename is the filename of the secret used in creation of the clusterID.
|
||||||
MeasurementSecretFilename = "measurementSecret"
|
MeasurementSecretFilename = "measurementSecret"
|
||||||
// IdKeyDigestFilename is the name of the file holding the currently enforced idkeydigest.
|
// IDKeyDigestFilename is the name of the file holding the currently enforced idkeydigest.
|
||||||
IdKeyDigestFilename = "idkeydigest"
|
IDKeyDigestFilename = "idkeydigest"
|
||||||
// EnforceIdKeyDigestFilename is the name of the file configuring whether idkeydigest is enforced or not.
|
// EnforceIDKeyDigestFilename is the name of the file configuring whether idkeydigest is enforced or not.
|
||||||
EnforceIdKeyDigestFilename = "enforceIdKeyDigest"
|
EnforceIDKeyDigestFilename = "enforceIdKeyDigest"
|
||||||
// AzureCVM is the name of the file indicating whether the cluster is expected to run on CVMs or not.
|
// AzureCVM is the name of the file indicating whether the cluster is expected to run on CVMs or not.
|
||||||
AzureCVM = "azureCVM"
|
AzureCVM = "azureCVM"
|
||||||
// K8sVersion is the filename of the mapped "k8s-version" configMap file.
|
// K8sVersion is the filename of the mapped "k8s-version" configMap file.
|
||||||
|
@ -58,15 +58,20 @@ const (
|
|||||||
NodeMaintenanceOperatorVersion = "v0.13.1-alpha1"
|
NodeMaintenanceOperatorVersion = "v0.13.1-alpha1"
|
||||||
|
|
||||||
// currently supported versions.
|
// currently supported versions.
|
||||||
V1_22 ValidK8sVersion = "1.22"
|
//nolint:revive
|
||||||
V1_23 ValidK8sVersion = "1.23"
|
V1_22 ValidK8sVersion = "1.22"
|
||||||
V1_24 ValidK8sVersion = "1.24"
|
//nolint:revive
|
||||||
V1_25 ValidK8sVersion = "1.25"
|
V1_23 ValidK8sVersion = "1.23"
|
||||||
|
//nolint:revive
|
||||||
|
V1_24 ValidK8sVersion = "1.24"
|
||||||
|
//nolint:revive
|
||||||
|
V1_25 ValidK8sVersion = "1.25"
|
||||||
|
|
||||||
Default ValidK8sVersion = V1_23
|
Default ValidK8sVersion = V1_23
|
||||||
)
|
)
|
||||||
|
|
||||||
// versionConfigs holds download URLs for all required kubernetes components for every supported version.
|
// versionConfigs holds download URLs for all required kubernetes components for every supported version.
|
||||||
var VersionConfigs map[ValidK8sVersion]KubernetesVersion = map[ValidK8sVersion]KubernetesVersion{
|
var VersionConfigs = map[ValidK8sVersion]KubernetesVersion{
|
||||||
V1_22: {
|
V1_22: {
|
||||||
PatchVersion: "1.22.12",
|
PatchVersion: "1.22.12",
|
||||||
CNIPluginsURL: "https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz",
|
CNIPluginsURL: "https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz",
|
||||||
|
@ -108,21 +108,21 @@ func (u *Updatable) Update() error {
|
|||||||
u.log.Debugf("Enforced PCRs: %v", enforced)
|
u.log.Debugf("Enforced PCRs: %v", enforced)
|
||||||
|
|
||||||
var idkeydigest []byte
|
var idkeydigest []byte
|
||||||
var enforceIdKeyDigest bool
|
var enforceIDKeyDigest bool
|
||||||
if u.csp == cloudprovider.Azure && u.azureCVM {
|
if u.csp == cloudprovider.Azure && u.azureCVM {
|
||||||
u.log.Infof("Updating encforceIdKeyDigest value")
|
u.log.Infof("Updating encforceIdKeyDigest value")
|
||||||
enforceRaw, err := u.fileHandler.Read(filepath.Join(constants.ServiceBasePath, constants.EnforceIdKeyDigestFilename))
|
enforceRaw, err := u.fileHandler.Read(filepath.Join(constants.ServiceBasePath, constants.EnforceIDKeyDigestFilename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
enforceIdKeyDigest, err = strconv.ParseBool(string(enforceRaw))
|
enforceIDKeyDigest, err = strconv.ParseBool(string(enforceRaw))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("parsing content of EnforceIdKeyDigestFilename: %s: %w", enforceRaw, err)
|
return fmt.Errorf("parsing content of EnforceIdKeyDigestFilename: %s: %w", enforceRaw, err)
|
||||||
}
|
}
|
||||||
u.log.Debugf("New encforceIdKeyDigest value: %v", enforceIdKeyDigest)
|
u.log.Debugf("New encforceIdKeyDigest value: %v", enforceIDKeyDigest)
|
||||||
|
|
||||||
u.log.Infof("Updating expected idkeydigest")
|
u.log.Infof("Updating expected idkeydigest")
|
||||||
idkeydigestRaw, err := u.fileHandler.Read(filepath.Join(constants.ServiceBasePath, constants.IdKeyDigestFilename))
|
idkeydigestRaw, err := u.fileHandler.Read(filepath.Join(constants.ServiceBasePath, constants.IDKeyDigestFilename))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -133,7 +133,7 @@ func (u *Updatable) Update() error {
|
|||||||
u.log.Debugf("New idkeydigest: %x", idkeydigest)
|
u.log.Debugf("New idkeydigest: %x", idkeydigest)
|
||||||
}
|
}
|
||||||
|
|
||||||
u.Validator = u.newValidator(measurements, enforced, idkeydigest, enforceIdKeyDigest, u.log)
|
u.Validator = u.newValidator(measurements, enforced, idkeydigest, enforceIDKeyDigest, u.log)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -84,11 +84,11 @@ func TestNewUpdateableValidator(t *testing.T) {
|
|||||||
[]uint32{11},
|
[]uint32{11},
|
||||||
))
|
))
|
||||||
require.NoError(handler.Write(
|
require.NoError(handler.Write(
|
||||||
filepath.Join(constants.ServiceBasePath, constants.IdKeyDigestFilename),
|
filepath.Join(constants.ServiceBasePath, constants.IDKeyDigestFilename),
|
||||||
[]byte{},
|
[]byte{},
|
||||||
))
|
))
|
||||||
require.NoError(handler.Write(
|
require.NoError(handler.Write(
|
||||||
filepath.Join(constants.ServiceBasePath, constants.EnforceIdKeyDigestFilename),
|
filepath.Join(constants.ServiceBasePath, constants.EnforceIDKeyDigestFilename),
|
||||||
[]byte("false"),
|
[]byte("false"),
|
||||||
))
|
))
|
||||||
require.NoError(handler.Write(
|
require.NoError(handler.Write(
|
||||||
@ -145,11 +145,11 @@ func TestUpdate(t *testing.T) {
|
|||||||
[]uint32{11},
|
[]uint32{11},
|
||||||
))
|
))
|
||||||
require.NoError(handler.Write(
|
require.NoError(handler.Write(
|
||||||
filepath.Join(constants.ServiceBasePath, constants.IdKeyDigestFilename),
|
filepath.Join(constants.ServiceBasePath, constants.IDKeyDigestFilename),
|
||||||
[]byte{},
|
[]byte{},
|
||||||
))
|
))
|
||||||
require.NoError(handler.Write(
|
require.NoError(handler.Write(
|
||||||
filepath.Join(constants.ServiceBasePath, constants.EnforceIdKeyDigestFilename),
|
filepath.Join(constants.ServiceBasePath, constants.EnforceIDKeyDigestFilename),
|
||||||
[]byte("false"),
|
[]byte("false"),
|
||||||
))
|
))
|
||||||
require.NoError(handler.Write(
|
require.NoError(handler.Write(
|
||||||
@ -215,11 +215,11 @@ func TestUpdateConcurrency(t *testing.T) {
|
|||||||
[]uint32{11},
|
[]uint32{11},
|
||||||
))
|
))
|
||||||
require.NoError(handler.Write(
|
require.NoError(handler.Write(
|
||||||
filepath.Join(constants.ServiceBasePath, constants.IdKeyDigestFilename),
|
filepath.Join(constants.ServiceBasePath, constants.IDKeyDigestFilename),
|
||||||
[]byte{},
|
[]byte{},
|
||||||
))
|
))
|
||||||
require.NoError(handler.Write(
|
require.NoError(handler.Write(
|
||||||
filepath.Join(constants.ServiceBasePath, constants.EnforceIdKeyDigestFilename),
|
filepath.Join(constants.ServiceBasePath, constants.EnforceIDKeyDigestFilename),
|
||||||
[]byte("false"),
|
[]byte("false"),
|
||||||
))
|
))
|
||||||
require.NoError(handler.Write(
|
require.NoError(handler.Write(
|
||||||
|
@ -58,7 +58,7 @@ func main() {
|
|||||||
// set up Key Management Service
|
// set up Key Management Service
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
conKMS, err := setup.SetUpKMS(ctx, setup.NoStoreURI, keyURI)
|
conKMS, err := setup.KMS(ctx, setup.NoStoreURI, keyURI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.With(zap.Error(err)).Fatalf("Failed to setup KMS")
|
log.With(zap.Error(err)).Fatalf("Failed to setup KMS")
|
||||||
}
|
}
|
||||||
|
@ -13,25 +13,25 @@ import (
|
|||||||
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ClusterKMS implements the kms.CloudKMS interface for in cluster key management.
|
// KMS implements the kms.CloudKMS interface for in cluster key management.
|
||||||
type ClusterKMS struct {
|
type KMS struct {
|
||||||
masterKey []byte
|
masterKey []byte
|
||||||
salt []byte
|
salt []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new ClusterKMS.
|
// New creates a new ClusterKMS.
|
||||||
func New(salt []byte) *ClusterKMS {
|
func New(salt []byte) *KMS {
|
||||||
return &ClusterKMS{salt: salt}
|
return &KMS{salt: salt}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateKEK sets the ClusterKMS masterKey.
|
// CreateKEK sets the ClusterKMS masterKey.
|
||||||
func (c *ClusterKMS) CreateKEK(ctx context.Context, keyID string, kek []byte) error {
|
func (c *KMS) CreateKEK(ctx context.Context, keyID string, kek []byte) error {
|
||||||
c.masterKey = kek
|
c.masterKey = kek
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDEK derives a key from the KMS masterKey.
|
// GetDEK derives a key from the KMS masterKey.
|
||||||
func (c *ClusterKMS) GetDEK(ctx context.Context, kekID string, dekID string, dekSize int) ([]byte, error) {
|
func (c *KMS) GetDEK(ctx context.Context, kekID string, dekID string, dekSize int) ([]byte, error) {
|
||||||
if len(c.masterKey) == 0 {
|
if len(c.masterKey) == 0 {
|
||||||
return nil, errors.New("master key not set for Constellation KMS")
|
return nil, errors.New("master key not set for Constellation KMS")
|
||||||
}
|
}
|
||||||
|
@ -40,8 +40,8 @@ type KMSInformation struct {
|
|||||||
KeyEncryptionKeyID string
|
KeyEncryptionKeyID string
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetUpKMS creates a KMS and key store from the given parameters.
|
// KMS creates a KMS and key store from the given parameters.
|
||||||
func SetUpKMS(ctx context.Context, storageURI, kmsURI string) (kms.CloudKMS, error) {
|
func KMS(ctx context.Context, storageURI, kmsURI string) (kms.CloudKMS, error) {
|
||||||
store, err := getStore(ctx, storageURI)
|
store, err := getStore(ctx, storageURI)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -131,11 +131,11 @@ func TestGetKMS(t *testing.T) {
|
|||||||
func TestSetUpKMS(t *testing.T) {
|
func TestSetUpKMS(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
kms, err := SetUpKMS(context.Background(), "storage://unknown", "kms://unknown")
|
kms, err := KMS(context.Background(), "storage://unknown", "kms://unknown")
|
||||||
assert.Error(err)
|
assert.Error(err)
|
||||||
assert.Nil(kms)
|
assert.Nil(kms)
|
||||||
|
|
||||||
kms, err = SetUpKMS(context.Background(), "storage://no-store", "kms://cluster-kms?salt="+base64.URLEncoding.EncodeToString([]byte("salt")))
|
kms, err = KMS(context.Background(), "storage://no-store", "kms://cluster-kms?salt="+base64.URLEncoding.EncodeToString([]byte("salt")))
|
||||||
assert.NoError(err)
|
assert.NoError(err)
|
||||||
assert.NotNil(kms)
|
assert.NotNil(kms)
|
||||||
}
|
}
|
||||||
|
@ -32,9 +32,8 @@ func (c *Client) GetNodeImage(ctx context.Context, providerID string) (string, e
|
|||||||
}
|
}
|
||||||
if resp.Properties.StorageProfile.ImageReference.ID != nil {
|
if resp.Properties.StorageProfile.ImageReference.ID != nil {
|
||||||
return *resp.Properties.StorageProfile.ImageReference.ID, nil
|
return *resp.Properties.StorageProfile.ImageReference.ID, nil
|
||||||
} else {
|
|
||||||
return *resp.Properties.StorageProfile.ImageReference.CommunityGalleryImageID, nil
|
|
||||||
}
|
}
|
||||||
|
return *resp.Properties.StorageProfile.ImageReference.CommunityGalleryImageID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetScalingGroupID returns the scaling group ID of the node.
|
// GetScalingGroupID returns the scaling group ID of the node.
|
||||||
|
@ -222,7 +222,7 @@ func TestCreateNode(t *testing.T) {
|
|||||||
list: tc.preexistingVMs,
|
list: tc.preexistingVMs,
|
||||||
fetchErr: tc.fetchErr,
|
fetchErr: tc.fetchErr,
|
||||||
}
|
}
|
||||||
poller := NewStubCapacityPoller(tc.pollErr)
|
poller := newStubCapacityPoller(tc.pollErr)
|
||||||
client := Client{
|
client := Client{
|
||||||
virtualMachineScaleSetVMsAPI: &stubvirtualMachineScaleSetVMsAPI{
|
virtualMachineScaleSetVMsAPI: &stubvirtualMachineScaleSetVMsAPI{
|
||||||
pager: pager,
|
pager: pager,
|
||||||
@ -357,7 +357,7 @@ type stubCapacityPoller struct {
|
|||||||
doneC chan struct{}
|
doneC chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStubCapacityPoller(pollErr error) *stubCapacityPoller {
|
func newStubCapacityPoller(pollErr error) *stubCapacityPoller {
|
||||||
return &stubCapacityPoller{
|
return &stubCapacityPoller{
|
||||||
pollErr: pollErr,
|
pollErr: pollErr,
|
||||||
pollC: make(chan struct{}),
|
pollC: make(chan struct{}),
|
||||||
|
@ -34,9 +34,8 @@ func (c *Client) GetScalingGroupImage(ctx context.Context, scalingGroupID string
|
|||||||
}
|
}
|
||||||
if res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.ID != nil {
|
if res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.ID != nil {
|
||||||
return *res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.ID, nil
|
return *res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.ID, nil
|
||||||
} else {
|
|
||||||
return *res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.CommunityGalleryImageID, nil
|
|
||||||
}
|
}
|
||||||
|
return *res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.CommunityGalleryImageID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetScalingGroupImage sets the image URI of the scaling group.
|
// SetScalingGroupImage sets the image URI of the scaling group.
|
||||||
|
Loading…
Reference in New Issue
Block a user