mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-24 06:59:40 -05:00
Feat/revive (#212)
* enable revive as linter * fix var-naming revive issues * fix blank-imports revive issues * fix receiver-naming revive issues * fix exported revive issues * fix indent-error-flow revive issues * fix unexported-return revive issues * fix indent-error-flow revive issues Signed-off-by: Fabian Kammel <fk@edgeless.systems>
This commit is contained in:
parent
2e93b354e4
commit
369480a50b
@ -27,6 +27,7 @@ linters:
|
||||
- gofumpt
|
||||
- misspell
|
||||
- noctx
|
||||
- revive
|
||||
- tenv
|
||||
- unconvert
|
||||
- unparam
|
||||
|
@ -115,7 +115,7 @@ func main() {
|
||||
log.With(zap.Error(err)).Fatalf("Failed to get selected PCRs")
|
||||
}
|
||||
|
||||
if idkeydigest, err := snp.GetIdKeyDigest(vtpm.OpenVTPM); err == nil {
|
||||
if idkeydigest, err := snp.GetIDKeyDigest(vtpm.OpenVTPM); err == nil {
|
||||
issuer = initserver.NewIssuerWrapper(snp.NewIssuer(), vmtype.AzureCVM, idkeydigest)
|
||||
} else {
|
||||
// assume we are running in a trusted-launch VM
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type cleaner struct {
|
||||
type Cleaner struct {
|
||||
stoppers []stopper
|
||||
stopC chan struct{}
|
||||
startOnce sync.Once
|
||||
@ -18,8 +18,8 @@ type cleaner struct {
|
||||
}
|
||||
|
||||
// New creates a new cleaner.
|
||||
func New(stoppers ...stopper) *cleaner {
|
||||
res := &cleaner{
|
||||
func New(stoppers ...stopper) *Cleaner {
|
||||
res := &Cleaner{
|
||||
stoppers: stoppers,
|
||||
stopC: make(chan struct{}, 1),
|
||||
}
|
||||
@ -28,13 +28,13 @@ func New(stoppers ...stopper) *cleaner {
|
||||
}
|
||||
|
||||
// With adds a new stopper to the cleaner.
|
||||
func (c *cleaner) With(stopper stopper) *cleaner {
|
||||
func (c *Cleaner) With(stopper stopper) *Cleaner {
|
||||
c.stoppers = append(c.stoppers, stopper)
|
||||
return c
|
||||
}
|
||||
|
||||
// Start blocks until it receives a stop message, stops all services gracefully and returns.
|
||||
func (c *cleaner) Start() {
|
||||
func (c *Cleaner) Start() {
|
||||
c.startOnce.Do(func() {
|
||||
defer c.wg.Done()
|
||||
// wait for the stop message
|
||||
@ -51,7 +51,7 @@ func (c *cleaner) Start() {
|
||||
}
|
||||
|
||||
// Clean initiates the cleanup but does not wait for it to complete.
|
||||
func (c *cleaner) Clean() {
|
||||
func (c *Cleaner) Clean() {
|
||||
// try to enqueue the stop message once
|
||||
// if the channel is full, the message is dropped
|
||||
select {
|
||||
@ -61,7 +61,7 @@ func (c *cleaner) Clean() {
|
||||
}
|
||||
|
||||
// Done waits for the cleanup to complete.
|
||||
func (c *cleaner) Done() {
|
||||
func (c *Cleaner) Done() {
|
||||
c.wg.Wait()
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ func (s *Server) Init(ctx context.Context, req *initproto.InitRequest) (*initpro
|
||||
measurementSalt,
|
||||
req.EnforcedPcrs,
|
||||
req.EnforceIdkeydigest,
|
||||
s.issuerWrapper.IdKeyDigest(),
|
||||
s.issuerWrapper.IDKeyDigest(),
|
||||
s.issuerWrapper.VMType() == vmtype.AzureCVM,
|
||||
resources.KMSConfig{
|
||||
MasterSecret: req.MasterSecret,
|
||||
@ -199,7 +199,7 @@ func (i *IssuerWrapper) VMType() vmtype.VMType {
|
||||
return i.vmType
|
||||
}
|
||||
|
||||
func (i *IssuerWrapper) IdKeyDigest() []byte {
|
||||
func (i *IssuerWrapper) IDKeyDigest() []byte {
|
||||
return i.idkeydigest
|
||||
}
|
||||
|
||||
@ -237,7 +237,7 @@ type ClusterInitializer interface {
|
||||
k8sVersion string,
|
||||
measurementSalt []byte,
|
||||
enforcedPcrs []uint32,
|
||||
enforceIdKeyDigest bool,
|
||||
enforceIDKeyDigest bool,
|
||||
idKeyDigest []byte,
|
||||
azureCVM bool,
|
||||
kmsConfig resources.KMSConfig,
|
||||
|
@ -18,8 +18,8 @@ import (
|
||||
|
||||
const accessManagerNamespace = "kube-system"
|
||||
|
||||
// accessManagerDeployment holds the configuration for the SSH user creation pods. User/Key definitions are stored in the ConfigMap, and the manager is deployed on each node by the DaemonSet.
|
||||
type accessManagerDeployment struct {
|
||||
// AccessManagerDeployment holds the configuration for the SSH user creation pods. User/Key definitions are stored in the ConfigMap, and the manager is deployed on each node by the DaemonSet.
|
||||
type AccessManagerDeployment struct {
|
||||
ConfigMap k8s.ConfigMap
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
Role rbac.Role
|
||||
@ -28,8 +28,8 @@ type accessManagerDeployment struct {
|
||||
}
|
||||
|
||||
// NewAccessManagerDeployment creates a new *accessManagerDeployment which manages the SSH users for the cluster.
|
||||
func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeployment {
|
||||
return &accessManagerDeployment{
|
||||
func NewAccessManagerDeployment(sshUsers map[string]string) *AccessManagerDeployment {
|
||||
return &AccessManagerDeployment{
|
||||
ServiceAccount: k8s.ServiceAccount{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
@ -198,6 +198,6 @@ func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeploy
|
||||
}
|
||||
|
||||
// Marshal marshals the access-manager deployment as YAML documents.
|
||||
func (c *accessManagerDeployment) Marshal() ([]byte, error) {
|
||||
func (c *AccessManagerDeployment) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(c)
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ func TestAccessManagerMarshalUnmarshal(t *testing.T) {
|
||||
data, err := accessManagerDeplNil.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated accessManagerDeployment
|
||||
var recreated AccessManagerDeployment
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(accessManagerDeplNil, &recreated)
|
||||
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type cloudControllerManagerDeployment struct {
|
||||
type CloudControllerManagerDeployment struct {
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
DaemonSet apps.DaemonSet
|
||||
@ -27,7 +27,7 @@ type cloudControllerManagerDeployment struct {
|
||||
// https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager
|
||||
|
||||
// NewDefaultCloudControllerManagerDeployment creates a new *cloudControllerManagerDeployment, customized for the CSP.
|
||||
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podCIDR string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *cloudControllerManagerDeployment {
|
||||
func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podCIDR string, extraArgs []string, extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar) *CloudControllerManagerDeployment {
|
||||
command := []string{
|
||||
path,
|
||||
fmt.Sprintf("--cloud-provider=%s", cloudProvider),
|
||||
@ -76,7 +76,7 @@ func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podC
|
||||
}
|
||||
volumeMounts = append(volumeMounts, extraVolumeMounts...)
|
||||
|
||||
return &cloudControllerManagerDeployment{
|
||||
return &CloudControllerManagerDeployment{
|
||||
ServiceAccount: k8s.ServiceAccount{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
@ -174,6 +174,6 @@ func NewDefaultCloudControllerManagerDeployment(cloudProvider, image, path, podC
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cloudControllerManagerDeployment) Marshal() ([]byte, error) {
|
||||
func (c *CloudControllerManagerDeployment) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(c)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ func TestCloudControllerMarshalUnmarshal(t *testing.T) {
|
||||
data, err := cloudControllerManagerDepl.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated cloudControllerManagerDeployment
|
||||
var recreated CloudControllerManagerDeployment
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(cloudControllerManagerDepl, &recreated)
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type cloudNodeManagerDeployment struct {
|
||||
type CloudNodeManagerDeployment struct {
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
ClusterRole rbac.ClusterRole
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
@ -23,13 +23,13 @@ type cloudNodeManagerDeployment struct {
|
||||
}
|
||||
|
||||
// NewDefaultCloudNodeManagerDeployment creates a new *cloudNodeManagerDeployment, customized for the CSP.
|
||||
func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string) *cloudNodeManagerDeployment {
|
||||
func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string) *CloudNodeManagerDeployment {
|
||||
command := []string{
|
||||
path,
|
||||
"--node-name=$(NODE_NAME)",
|
||||
}
|
||||
command = append(command, extraArgs...)
|
||||
return &cloudNodeManagerDeployment{
|
||||
return &CloudNodeManagerDeployment{
|
||||
ServiceAccount: k8s.ServiceAccount{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
@ -182,6 +182,6 @@ func NewDefaultCloudNodeManagerDeployment(image, path string, extraArgs []string
|
||||
}
|
||||
|
||||
// Marshal marshals the cloud-node-manager deployment as YAML documents.
|
||||
func (c *cloudNodeManagerDeployment) Marshal() ([]byte, error) {
|
||||
func (c *CloudNodeManagerDeployment) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(c)
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ func TestCloudNodeManagerMarshalUnmarshal(t *testing.T) {
|
||||
data, err := cloudNodeManagerDepl.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated cloudNodeManagerDeployment
|
||||
var recreated CloudNodeManagerDeployment
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(cloudNodeManagerDepl, &recreated)
|
||||
}
|
||||
|
@ -18,7 +18,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
type autoscalerDeployment struct {
|
||||
type AutoscalerDeployment struct {
|
||||
PodDisruptionBudget policy.PodDisruptionBudget
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
ClusterRole rbac.ClusterRole
|
||||
@ -30,8 +30,8 @@ type autoscalerDeployment struct {
|
||||
}
|
||||
|
||||
// NewDefaultAutoscalerDeployment creates a new *autoscalerDeployment, customized for the CSP.
|
||||
func NewDefaultAutoscalerDeployment(extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar, k8sVersion versions.ValidK8sVersion) *autoscalerDeployment {
|
||||
return &autoscalerDeployment{
|
||||
func NewDefaultAutoscalerDeployment(extraVolumes []k8s.Volume, extraVolumeMounts []k8s.VolumeMount, env []k8s.EnvVar, k8sVersion versions.ValidK8sVersion) *AutoscalerDeployment {
|
||||
return &AutoscalerDeployment{
|
||||
PodDisruptionBudget: policy.PodDisruptionBudget{
|
||||
TypeMeta: v1.TypeMeta{
|
||||
APIVersion: "policy/v1",
|
||||
@ -491,6 +491,6 @@ func NewDefaultAutoscalerDeployment(extraVolumes []k8s.Volume, extraVolumeMounts
|
||||
}
|
||||
}
|
||||
|
||||
func (a *autoscalerDeployment) Marshal() ([]byte, error) {
|
||||
func (a *AutoscalerDeployment) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(a)
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ func TestAutoscalerDeploymentMarshalUnmarshal(t *testing.T) {
|
||||
|
||||
t.Log(string(data))
|
||||
|
||||
var recreated autoscalerDeployment
|
||||
var recreated AutoscalerDeployment
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(autoscalerDepl, &recreated)
|
||||
}
|
||||
@ -41,7 +41,7 @@ func TestAutoscalerDeploymentWithCommandMarshalUnmarshal(t *testing.T) {
|
||||
|
||||
t.Log(string(data))
|
||||
|
||||
var recreated autoscalerDeployment
|
||||
var recreated AutoscalerDeployment
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(autoscalerDepl, &recreated)
|
||||
}
|
||||
|
@ -14,14 +14,14 @@ import (
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type gcpGuestAgentDaemonset struct {
|
||||
type GCPGuestAgentDaemonset struct {
|
||||
DaemonSet apps.DaemonSet
|
||||
}
|
||||
|
||||
// NewGCPGuestAgentDaemonset creates a new GCP Guest Agent Daemonset.
|
||||
// It is used automatically to add loadbalancer IPs to the local routing table of GCP instances.
|
||||
func NewGCPGuestAgentDaemonset() *gcpGuestAgentDaemonset {
|
||||
return &gcpGuestAgentDaemonset{
|
||||
func NewGCPGuestAgentDaemonset() *GCPGuestAgentDaemonset {
|
||||
return &GCPGuestAgentDaemonset{
|
||||
DaemonSet: apps.DaemonSet{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
@ -178,6 +178,6 @@ func NewGCPGuestAgentDaemonset() *gcpGuestAgentDaemonset {
|
||||
}
|
||||
|
||||
// Marshal marshals the access-manager deployment as YAML documents.
|
||||
func (c *gcpGuestAgentDaemonset) Marshal() ([]byte, error) {
|
||||
func (c *GCPGuestAgentDaemonset) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(c)
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
type joinServiceDaemonset struct {
|
||||
type JoinServiceDaemonset struct {
|
||||
ClusterRole rbac.ClusterRole
|
||||
ClusterRoleBinding rbac.ClusterRoleBinding
|
||||
ConfigMap k8s.ConfigMap
|
||||
@ -30,17 +30,17 @@ type joinServiceDaemonset struct {
|
||||
}
|
||||
|
||||
// NewJoinServiceDaemonset returns a daemonset for the join service.
|
||||
func NewJoinServiceDaemonset(csp, measurementsJSON, enforcedPCRsJSON, initialIdKeyDigest, enforceIdKeyDigest string, measurementSalt []byte) *joinServiceDaemonset {
|
||||
func NewJoinServiceDaemonset(csp, measurementsJSON, enforcedPCRsJSON, initialIDKeyDigest, enforceIDKeyDigest string, measurementSalt []byte) *JoinServiceDaemonset {
|
||||
joinConfigData := map[string]string{
|
||||
constants.MeasurementsFilename: measurementsJSON,
|
||||
constants.EnforcedPCRsFilename: enforcedPCRsJSON,
|
||||
}
|
||||
if cloudprovider.FromString(csp) == cloudprovider.Azure {
|
||||
joinConfigData[constants.EnforceIdKeyDigestFilename] = enforceIdKeyDigest
|
||||
joinConfigData[constants.IdKeyDigestFilename] = initialIdKeyDigest
|
||||
joinConfigData[constants.EnforceIDKeyDigestFilename] = enforceIDKeyDigest
|
||||
joinConfigData[constants.IDKeyDigestFilename] = initialIDKeyDigest
|
||||
}
|
||||
|
||||
return &joinServiceDaemonset{
|
||||
return &JoinServiceDaemonset{
|
||||
ClusterRole: rbac.ClusterRole{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
@ -272,6 +272,6 @@ func NewJoinServiceDaemonset(csp, measurementsJSON, enforcedPCRsJSON, initialIdK
|
||||
}
|
||||
|
||||
// Marshal the daemonset using the Kubernetes resource marshaller.
|
||||
func (a *joinServiceDaemonset) Marshal() ([]byte, error) {
|
||||
func (a *JoinServiceDaemonset) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(a)
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ func TestNewJoinServiceDaemonset(t *testing.T) {
|
||||
deploymentYAML, err := deployment.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
var recreated joinServiceDaemonset
|
||||
var recreated JoinServiceDaemonset
|
||||
require.NoError(t, kubernetes.UnmarshalK8SResources(deploymentYAML, &recreated))
|
||||
assert.Equal(t, deployment, &recreated)
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
|
||||
const kmsNamespace = "kube-system"
|
||||
|
||||
type kmsDeployment struct {
|
||||
type KMSDeployment struct {
|
||||
ServiceAccount k8s.ServiceAccount
|
||||
Service k8s.Service
|
||||
ClusterRole rbac.ClusterRole
|
||||
@ -41,8 +41,8 @@ type KMSConfig struct {
|
||||
}
|
||||
|
||||
// NewKMSDeployment creates a new *kmsDeployment to use as the key management system inside Constellation.
|
||||
func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
|
||||
return &kmsDeployment{
|
||||
func NewKMSDeployment(csp string, config KMSConfig) *KMSDeployment {
|
||||
return &KMSDeployment{
|
||||
ServiceAccount: k8s.ServiceAccount{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
@ -254,6 +254,6 @@ func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *kmsDeployment) Marshal() ([]byte, error) {
|
||||
func (c *KMSDeployment) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(c)
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ func TestKMSMarshalUnmarshal(t *testing.T) {
|
||||
data, err := kmsDepl.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated kmsDeployment
|
||||
var recreated KMSDeployment
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(kmsDepl, &recreated)
|
||||
}
|
||||
|
@ -28,22 +28,22 @@ const (
|
||||
KonnectivityKeyFilename = "/etc/kubernetes/konnectivity.key"
|
||||
)
|
||||
|
||||
type konnectivityAgents struct {
|
||||
type KonnectivityAgents struct {
|
||||
DaemonSet appsv1.DaemonSet
|
||||
ClusterRoleBinding rbacv1.ClusterRoleBinding
|
||||
ServiceAccount corev1.ServiceAccount
|
||||
}
|
||||
|
||||
type konnectivityServerStaticPod struct {
|
||||
type KonnectivityServerStaticPod struct {
|
||||
StaticPod corev1.Pod
|
||||
}
|
||||
|
||||
type egressSelectorConfiguration struct {
|
||||
type EgressSelectorConfiguration struct {
|
||||
EgressSelectorConfiguration apiserver.EgressSelectorConfiguration
|
||||
}
|
||||
|
||||
func NewKonnectivityAgents(konnectivityServerAddress string) *konnectivityAgents {
|
||||
return &konnectivityAgents{
|
||||
func NewKonnectivityAgents(konnectivityServerAddress string) *KonnectivityAgents {
|
||||
return &KonnectivityAgents{
|
||||
DaemonSet: appsv1.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
@ -213,9 +213,9 @@ func NewKonnectivityAgents(konnectivityServerAddress string) *konnectivityAgents
|
||||
}
|
||||
}
|
||||
|
||||
func NewKonnectivityServerStaticPod() *konnectivityServerStaticPod {
|
||||
func NewKonnectivityServerStaticPod() *KonnectivityServerStaticPod {
|
||||
udsHostPathType := corev1.HostPathDirectoryOrCreate
|
||||
return &konnectivityServerStaticPod{
|
||||
return &KonnectivityServerStaticPod{
|
||||
StaticPod: corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
@ -333,8 +333,8 @@ func NewKonnectivityServerStaticPod() *konnectivityServerStaticPod {
|
||||
}
|
||||
}
|
||||
|
||||
func NewEgressSelectorConfiguration() *egressSelectorConfiguration {
|
||||
return &egressSelectorConfiguration{
|
||||
func NewEgressSelectorConfiguration() *EgressSelectorConfiguration {
|
||||
return &EgressSelectorConfiguration{
|
||||
EgressSelectorConfiguration: apiserver.EgressSelectorConfiguration{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "apiserver.k8s.io/v1beta1",
|
||||
@ -357,15 +357,15 @@ func NewEgressSelectorConfiguration() *egressSelectorConfiguration {
|
||||
}
|
||||
}
|
||||
|
||||
func (v *konnectivityAgents) Marshal() ([]byte, error) {
|
||||
func (v *KonnectivityAgents) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(v)
|
||||
}
|
||||
|
||||
func (v *konnectivityServerStaticPod) Marshal() ([]byte, error) {
|
||||
func (v *KonnectivityServerStaticPod) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(v)
|
||||
}
|
||||
|
||||
func (v *egressSelectorConfiguration) Marshal() ([]byte, error) {
|
||||
func (v *EgressSelectorConfiguration) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(v)
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ func TestKonnectivityMarshalUnmarshal(t *testing.T) {
|
||||
data, err := kmsDepl.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated konnectivityAgents
|
||||
var recreated KonnectivityAgents
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(kmsDepl, &recreated)
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ const (
|
||||
nodeMaintenanceOperatorCatalogNamespace = "olm"
|
||||
)
|
||||
|
||||
type nodeMaintenanceOperatorDeployment struct {
|
||||
type NodeMaintenanceOperatorDeployment struct {
|
||||
CatalogSource operatorsv1alpha1.CatalogSource
|
||||
OperatorGroup operatorsv1.OperatorGroup
|
||||
Subscription operatorsv1alpha1.Subscription
|
||||
@ -29,8 +29,8 @@ type nodeMaintenanceOperatorDeployment struct {
|
||||
|
||||
// NewNodeMaintenanceOperatorDeployment creates a new node maintenance operator (NMO) deployment.
|
||||
// See https://github.com/medik8s/node-maintenance-operator for more information.
|
||||
func NewNodeMaintenanceOperatorDeployment() *nodeMaintenanceOperatorDeployment {
|
||||
return &nodeMaintenanceOperatorDeployment{
|
||||
func NewNodeMaintenanceOperatorDeployment() *NodeMaintenanceOperatorDeployment {
|
||||
return &NodeMaintenanceOperatorDeployment{
|
||||
CatalogSource: operatorsv1alpha1.CatalogSource{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -80,6 +80,6 @@ func NewNodeMaintenanceOperatorDeployment() *nodeMaintenanceOperatorDeployment {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *nodeMaintenanceOperatorDeployment) Marshal() ([]byte, error) {
|
||||
func (c *NodeMaintenanceOperatorDeployment) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(c)
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ func TestNodeMaintenanceOperatorMarshalUnmarshal(t *testing.T) {
|
||||
data, err := nmoDepl.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated nodeMaintenanceOperatorDeployment
|
||||
var recreated NodeMaintenanceOperatorDeployment
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(nmoDepl, &recreated)
|
||||
}
|
||||
|
@ -7,7 +7,6 @@ SPDX-License-Identifier: AGPL-3.0-only
|
||||
package resources
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"time"
|
||||
|
||||
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
|
||||
@ -31,7 +30,7 @@ var NodeOperatorCRDNames = []string{
|
||||
"scalinggroups.update.edgeless.systems",
|
||||
}
|
||||
|
||||
type nodeOperatorDeployment struct {
|
||||
type NodeOperatorDeployment struct {
|
||||
CatalogSource operatorsv1alpha1.CatalogSource
|
||||
OperatorGroup operatorsv1.OperatorGroup
|
||||
Subscription operatorsv1alpha1.Subscription
|
||||
@ -39,8 +38,8 @@ type nodeOperatorDeployment struct {
|
||||
|
||||
// NewNodeOperatorDeployment creates a new constellation node operator deployment.
|
||||
// See /operators/constellation-node-operator for more information.
|
||||
func NewNodeOperatorDeployment(cloudProvider string, uid string) *nodeOperatorDeployment {
|
||||
return &nodeOperatorDeployment{
|
||||
func NewNodeOperatorDeployment(cloudProvider string, uid string) *NodeOperatorDeployment {
|
||||
return &NodeOperatorDeployment{
|
||||
CatalogSource: operatorsv1alpha1.CatalogSource{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -94,6 +93,6 @@ func NewNodeOperatorDeployment(cloudProvider string, uid string) *nodeOperatorDe
|
||||
}
|
||||
}
|
||||
|
||||
func (c *nodeOperatorDeployment) Marshal() ([]byte, error) {
|
||||
func (c *NodeOperatorDeployment) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(c)
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ func TestNodeOperatorMarshalUnmarshal(t *testing.T) {
|
||||
data, err := nmoDepl.Marshal()
|
||||
require.NoError(err)
|
||||
|
||||
var recreated nodeOperatorDeployment
|
||||
var recreated NodeOperatorDeployment
|
||||
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
|
||||
assert.Equal(nmoDepl, &recreated)
|
||||
}
|
||||
|
@ -21,13 +21,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
type verificationDaemonset struct {
|
||||
type VerificationDaemonset struct {
|
||||
DaemonSet apps.DaemonSet
|
||||
Service k8s.Service
|
||||
LoadBalancer k8s.Service
|
||||
}
|
||||
|
||||
func NewVerificationDaemonSet(csp, loadBalancerIP string) *verificationDaemonset {
|
||||
func NewVerificationDaemonSet(csp, loadBalancerIP string) *VerificationDaemonset {
|
||||
var err error
|
||||
if strings.Contains(loadBalancerIP, ":") {
|
||||
loadBalancerIP, _, err = net.SplitHostPort(loadBalancerIP)
|
||||
@ -35,7 +35,7 @@ func NewVerificationDaemonSet(csp, loadBalancerIP string) *verificationDaemonset
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
return &verificationDaemonset{
|
||||
return &VerificationDaemonset{
|
||||
DaemonSet: apps.DaemonSet{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "apps/v1",
|
||||
@ -188,6 +188,6 @@ func NewVerificationDaemonSet(csp, loadBalancerIP string) *verificationDaemonset
|
||||
}
|
||||
}
|
||||
|
||||
func (v *verificationDaemonset) Marshal() ([]byte, error) {
|
||||
func (v *VerificationDaemonset) Marshal() ([]byte, error) {
|
||||
return kubernetes.MarshalK8SResources(v)
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ func TestNewVerificationDaemonset(t *testing.T) {
|
||||
deploymentYAML, err := deployment.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
var recreated verificationDaemonset
|
||||
var recreated VerificationDaemonset
|
||||
require.NoError(t, kubernetes.UnmarshalK8SResources(deploymentYAML, &recreated))
|
||||
assert.Equal(t, deployment, &recreated)
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ func New(cloudProvider string, clusterUtil clusterUtil, configProvider configura
|
||||
// InitCluster initializes a new Kubernetes cluster and applies pod network provider.
|
||||
func (k *KubeWrapper) InitCluster(
|
||||
ctx context.Context, cloudServiceAccountURI, versionString string, measurementSalt []byte, enforcedPCRs []uint32,
|
||||
enforceIdKeyDigest bool, idKeyDigest []byte, azureCVM bool, kmsConfig resources.KMSConfig, sshUsers map[string]string,
|
||||
enforceIDKeyDigest bool, idKeyDigest []byte, azureCVM bool, kmsConfig resources.KMSConfig, sshUsers map[string]string,
|
||||
helmDeployments []byte, conformanceMode bool, log *logger.Logger,
|
||||
) ([]byte, error) {
|
||||
k8sVersion, err := versions.NewValidK8sVersion(versionString)
|
||||
@ -200,7 +200,7 @@ func (k *KubeWrapper) InitCluster(
|
||||
return nil, fmt.Errorf("failed to setup internal ConfigMap: %w", err)
|
||||
}
|
||||
|
||||
if err := k.setupJoinService(k.cloudProvider, k.initialMeasurementsJSON, measurementSalt, enforcedPCRs, idKeyDigest, enforceIdKeyDigest); err != nil {
|
||||
if err := k.setupJoinService(k.cloudProvider, k.initialMeasurementsJSON, measurementSalt, enforcedPCRs, idKeyDigest, enforceIDKeyDigest); err != nil {
|
||||
return nil, fmt.Errorf("setting up join service failed: %w", err)
|
||||
}
|
||||
|
||||
@ -321,7 +321,7 @@ func (k *KubeWrapper) GetKubeconfig() ([]byte, error) {
|
||||
}
|
||||
|
||||
func (k *KubeWrapper) setupJoinService(
|
||||
csp string, measurementsJSON, measurementSalt []byte, enforcedPCRs []uint32, initialIdKeyDigest []byte, enforceIdKeyDigest bool,
|
||||
csp string, measurementsJSON, measurementSalt []byte, enforcedPCRs []uint32, initialIDKeyDigest []byte, enforceIDKeyDigest bool,
|
||||
) error {
|
||||
enforcedPCRsJSON, err := json.Marshal(enforcedPCRs)
|
||||
if err != nil {
|
||||
@ -329,7 +329,7 @@ func (k *KubeWrapper) setupJoinService(
|
||||
}
|
||||
|
||||
joinConfiguration := resources.NewJoinServiceDaemonset(
|
||||
csp, string(measurementsJSON), string(enforcedPCRsJSON), hex.EncodeToString(initialIdKeyDigest), strconv.FormatBool(enforceIdKeyDigest), measurementSalt,
|
||||
csp, string(measurementsJSON), string(enforcedPCRsJSON), hex.EncodeToString(initialIDKeyDigest), strconv.FormatBool(enforceIDKeyDigest), measurementSalt,
|
||||
)
|
||||
|
||||
return k.clusterUtil.SetupJoinService(k.client, joinConfiguration)
|
||||
|
@ -29,7 +29,7 @@ type Validator struct {
|
||||
pcrs map[uint32][]byte
|
||||
enforcedPCRs []uint32
|
||||
idkeydigest []byte
|
||||
enforceIdKeyDigest bool
|
||||
enforceIDKeyDigest bool
|
||||
azureCVM bool
|
||||
validator atls.Validator
|
||||
}
|
||||
@ -47,11 +47,11 @@ func NewValidator(provider cloudprovider.Provider, config *config.Config) (*Vali
|
||||
if v.provider == cloudprovider.Azure {
|
||||
v.azureCVM = *config.Provider.Azure.ConfidentialVM
|
||||
if v.azureCVM {
|
||||
idkeydigest, err := hex.DecodeString(config.Provider.Azure.IdKeyDigest)
|
||||
idkeydigest, err := hex.DecodeString(config.Provider.Azure.IDKeyDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("bad config: decoding idkeydigest from config: %w", err)
|
||||
}
|
||||
v.enforceIdKeyDigest = *config.Provider.Azure.EnforceIdKeyDigest
|
||||
v.enforceIDKeyDigest = *config.Provider.Azure.EnforceIDKeyDigest
|
||||
v.idkeydigest = idkeydigest
|
||||
}
|
||||
}
|
||||
@ -146,7 +146,7 @@ func (v *Validator) updateValidator(cmd *cobra.Command) {
|
||||
v.validator = gcp.NewValidator(v.pcrs, v.enforcedPCRs, log)
|
||||
case cloudprovider.Azure:
|
||||
if v.azureCVM {
|
||||
v.validator = snp.NewValidator(v.pcrs, v.enforcedPCRs, v.idkeydigest, v.enforceIdKeyDigest, log)
|
||||
v.validator = snp.NewValidator(v.pcrs, v.enforcedPCRs, v.idkeydigest, v.enforceIDKeyDigest, log)
|
||||
} else {
|
||||
v.validator = trustedlaunch.NewValidator(v.pcrs, v.enforcedPCRs, log)
|
||||
}
|
||||
|
@ -39,8 +39,8 @@ func TestNewValidator(t *testing.T) {
|
||||
provider cloudprovider.Provider
|
||||
config *config.Config
|
||||
pcrs map[uint32][]byte
|
||||
enforceIdKeyDigest bool
|
||||
idkeydigest string
|
||||
enforceIDKeyDigest bool
|
||||
idKeyDigest string
|
||||
azureCVM bool
|
||||
wantErr bool
|
||||
}{
|
||||
@ -80,14 +80,14 @@ func TestNewValidator(t *testing.T) {
|
||||
"set idkeydigest": {
|
||||
provider: cloudprovider.Azure,
|
||||
pcrs: testPCRs,
|
||||
idkeydigest: "414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141",
|
||||
enforceIdKeyDigest: true,
|
||||
idKeyDigest: "414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141",
|
||||
enforceIDKeyDigest: true,
|
||||
},
|
||||
"invalid idkeydigest": {
|
||||
provider: cloudprovider.Azure,
|
||||
pcrs: testPCRs,
|
||||
idkeydigest: "41414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141414",
|
||||
enforceIdKeyDigest: true,
|
||||
idKeyDigest: "41414141414141414141414141414141414141414141414141414141414141414141414141414141414141414141414",
|
||||
enforceIDKeyDigest: true,
|
||||
azureCVM: true,
|
||||
wantErr: true,
|
||||
},
|
||||
@ -104,7 +104,7 @@ func TestNewValidator(t *testing.T) {
|
||||
}
|
||||
if tc.provider == cloudprovider.Azure {
|
||||
measurements := config.Measurements(tc.pcrs)
|
||||
conf.Provider.Azure = &config.AzureConfig{Measurements: measurements, EnforceIdKeyDigest: &tc.enforceIdKeyDigest, IdKeyDigest: tc.idkeydigest, ConfidentialVM: &tc.azureCVM}
|
||||
conf.Provider.Azure = &config.AzureConfig{Measurements: measurements, EnforceIDKeyDigest: &tc.enforceIDKeyDigest, IDKeyDigest: tc.idKeyDigest, ConfidentialVM: &tc.azureCVM}
|
||||
}
|
||||
if tc.provider == cloudprovider.QEMU {
|
||||
measurements := config.Measurements(tc.pcrs)
|
||||
|
@ -77,7 +77,7 @@ func create(cmd *cobra.Command, creator cloudCreator, fileHandler file.Handler)
|
||||
if config.IsAzureNonCVM() {
|
||||
cmd.Println("Disabling Confidential VMs is insecure. Use only for evaluation purposes.")
|
||||
printedAWarning = true
|
||||
if config.EnforcesIdKeyDigest() {
|
||||
if config.EnforcesIDKeyDigest() {
|
||||
cmd.Println("Your config asks for enforcing the idkeydigest. This is only available on Confidential VMs. It will not be enforced.")
|
||||
}
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ func initialize(cmd *cobra.Command, newDialer func(validator *cloudcmd.Validator
|
||||
SshUserKeys: ssh.ToProtoSlice(sshUsers),
|
||||
HelmDeployments: helmDeployments,
|
||||
EnforcedPcrs: getEnforcedMeasurements(provider, config),
|
||||
EnforceIdkeydigest: getEnforceIdKeyDigest(provider, config),
|
||||
EnforceIdkeydigest: getEnforceIDKeyDigest(provider, config),
|
||||
ConformanceMode: flags.conformance,
|
||||
}
|
||||
resp, err := initCall(cmd.Context(), newDialer(validator), flags.endpoint, req)
|
||||
@ -237,10 +237,10 @@ func getEnforcedMeasurements(provider cloudprovider.Provider, config *config.Con
|
||||
}
|
||||
}
|
||||
|
||||
func getEnforceIdKeyDigest(provider cloudprovider.Provider, config *config.Config) bool {
|
||||
func getEnforceIDKeyDigest(provider cloudprovider.Provider, config *config.Config) bool {
|
||||
switch provider {
|
||||
case cloudprovider.Azure:
|
||||
return *config.Provider.Azure.EnforceIdKeyDigest
|
||||
return *config.Provider.Azure.EnforceIDKeyDigest
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func main() {
|
||||
}
|
||||
sched := metadata.NewScheduler(log.Named("scheduler"), fetcher, ssh, download)
|
||||
serv := server.New(log.Named("server"), ssh, serviceManager, streamer)
|
||||
if err := deploy.DeployDefaultServiceUnit(ctx, serviceManager); err != nil {
|
||||
if err := deploy.DefaultServiceUnit(ctx, serviceManager); err != nil {
|
||||
log.Fatalf("%s", err)
|
||||
}
|
||||
|
||||
|
@ -179,13 +179,13 @@ type fakeDownloadServer struct {
|
||||
pb.UnimplementedDebugdServer
|
||||
}
|
||||
|
||||
func (f *fakeDownloadServer) DownloadBootstrapper(request *pb.DownloadBootstrapperRequest, stream pb.Debugd_DownloadBootstrapperServer) error {
|
||||
for _, chunk := range f.chunks {
|
||||
func (s *fakeDownloadServer) DownloadBootstrapper(request *pb.DownloadBootstrapperRequest, stream pb.Debugd_DownloadBootstrapperServer) error {
|
||||
for _, chunk := range s.chunks {
|
||||
if err := stream.Send(&pb.Chunk{Content: chunk}); err != nil {
|
||||
return fmt.Errorf("sending chunk: %w", err)
|
||||
}
|
||||
}
|
||||
return f.downladErr
|
||||
return s.downladErr
|
||||
}
|
||||
|
||||
func (s *fakeDownloadServer) DownloadAuthorizedKeys(context.Context, *pb.DownloadAuthorizedKeysRequest) (*pb.DownloadAuthorizedKeysResponse, error) {
|
||||
|
@ -146,8 +146,8 @@ func (s *ServiceManager) WriteSystemdUnitFile(ctx context.Context, unit SystemdU
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeployDefaultServiceUnit will write the default "bootstrapper.service" unit file.
|
||||
func DeployDefaultServiceUnit(ctx context.Context, serviceManager *ServiceManager) error {
|
||||
// DefaultServiceUnit will write the default "bootstrapper.service" unit file.
|
||||
func DefaultServiceUnit(ctx context.Context, serviceManager *ServiceManager) error {
|
||||
if err := serviceManager.WriteSystemdUnitFile(ctx, SystemdUnit{
|
||||
Name: debugd.BootstrapperSystemdUnitName,
|
||||
Contents: debugd.BootstrapperSystemdUnitContents,
|
||||
|
@ -130,19 +130,19 @@ func (s *RecoveryServer) Recover(stream recoverproto.API_RecoverServer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// stubServer implements the RecoveryServer interface but does not actually start a server.
|
||||
type stubServer struct {
|
||||
// StubServer implements the RecoveryServer interface but does not actually start a server.
|
||||
type StubServer struct {
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// NewStub returns a new stubbed RecoveryServer.
|
||||
// We use this to avoid having to start a server for worker nodes, since they don't require manual recovery.
|
||||
func NewStub(log *logger.Logger) *stubServer {
|
||||
return &stubServer{log: log}
|
||||
func NewStub(log *logger.Logger) *StubServer {
|
||||
return &StubServer{log: log}
|
||||
}
|
||||
|
||||
// Serve waits until the context is canceled and returns nil.
|
||||
func (s *stubServer) Serve(ctx context.Context, _ net.Listener, _ string) ([]byte, []byte, error) {
|
||||
func (s *StubServer) Serve(ctx context.Context, _ net.Listener, _ string) ([]byte, []byte, error) {
|
||||
s.log.Infof("Running as worker node, skipping recovery server")
|
||||
<-ctx.Done()
|
||||
return nil, nil, ctx.Err()
|
||||
|
@ -38,8 +38,8 @@ const (
|
||||
stateInfoPath = stateDiskMountPath + "/constellation/node_state.json"
|
||||
)
|
||||
|
||||
// SetupManager handles formatting, mapping, mounting and unmounting of state disks.
|
||||
type SetupManager struct {
|
||||
// Manager handles formatting, mapping, mounting and unmounting of state disks.
|
||||
type Manager struct {
|
||||
log *logger.Logger
|
||||
csp string
|
||||
diskPath string
|
||||
@ -53,8 +53,8 @@ type SetupManager struct {
|
||||
// New initializes a SetupManager with the given parameters.
|
||||
func New(log *logger.Logger, csp string, diskPath string, fs afero.Afero,
|
||||
mapper DeviceMapper, mounter Mounter, openTPM vtpm.TPMOpenFunc,
|
||||
) *SetupManager {
|
||||
return &SetupManager{
|
||||
) *Manager {
|
||||
return &Manager{
|
||||
log: log,
|
||||
csp: csp,
|
||||
diskPath: diskPath,
|
||||
@ -68,7 +68,7 @@ func New(log *logger.Logger, csp string, diskPath string, fs afero.Afero,
|
||||
|
||||
// PrepareExistingDisk requests and waits for a decryption key to remap the encrypted state disk.
|
||||
// Once the disk is mapped, the function taints the node as initialized by updating it's PCRs.
|
||||
func (s *SetupManager) PrepareExistingDisk(recover RecoveryDoer) error {
|
||||
func (s *Manager) PrepareExistingDisk(recover RecoveryDoer) error {
|
||||
s.log.Infof("Preparing existing state disk")
|
||||
uuid := s.mapper.DiskUUID()
|
||||
|
||||
@ -113,7 +113,7 @@ func (s *SetupManager) PrepareExistingDisk(recover RecoveryDoer) error {
|
||||
}
|
||||
|
||||
// PrepareNewDisk prepares an instances state disk by formatting the disk as a LUKS device using a random passphrase.
|
||||
func (s *SetupManager) PrepareNewDisk() error {
|
||||
func (s *Manager) PrepareNewDisk() error {
|
||||
s.log.Infof("Preparing new state disk")
|
||||
|
||||
// generate and save temporary passphrase
|
||||
@ -132,7 +132,7 @@ func (s *SetupManager) PrepareNewDisk() error {
|
||||
return s.mapper.MapDisk(stateDiskMappedName, string(passphrase))
|
||||
}
|
||||
|
||||
func (s *SetupManager) readMeasurementSalt(path string) ([]byte, error) {
|
||||
func (s *Manager) readMeasurementSalt(path string) ([]byte, error) {
|
||||
handler := file.NewHandler(s.fs)
|
||||
var state nodestate.NodeState
|
||||
if err := handler.ReadJSON(path, &state); err != nil {
|
||||
@ -147,7 +147,7 @@ func (s *SetupManager) readMeasurementSalt(path string) ([]byte, error) {
|
||||
}
|
||||
|
||||
// saveConfiguration saves the given passphrase and cryptsetup mapping configuration to disk.
|
||||
func (s *SetupManager) saveConfiguration(passphrase []byte) error {
|
||||
func (s *Manager) saveConfiguration(passphrase []byte) error {
|
||||
// passphrase
|
||||
if err := s.fs.MkdirAll(keyPath, os.ModePerm); err != nil {
|
||||
return err
|
||||
@ -168,14 +168,14 @@ type RejoinClient interface {
|
||||
Start(context.Context, string) (key, secret []byte)
|
||||
}
|
||||
|
||||
type nodeRecoverer struct {
|
||||
type NodeRecoverer struct {
|
||||
recoveryServer RecoveryServer
|
||||
rejoinClient RejoinClient
|
||||
}
|
||||
|
||||
// NewNodeRecoverer initializes a new nodeRecoverer.
|
||||
func NewNodeRecoverer(recoveryServer RecoveryServer, rejoinClient RejoinClient) *nodeRecoverer {
|
||||
return &nodeRecoverer{
|
||||
func NewNodeRecoverer(recoveryServer RecoveryServer, rejoinClient RejoinClient) *NodeRecoverer {
|
||||
return &NodeRecoverer{
|
||||
recoveryServer: recoveryServer,
|
||||
rejoinClient: rejoinClient,
|
||||
}
|
||||
@ -184,7 +184,7 @@ func NewNodeRecoverer(recoveryServer RecoveryServer, rejoinClient RejoinClient)
|
||||
// Do performs a recovery procedure on the given state disk.
|
||||
// The method starts a gRPC server to allow manual recovery by a user.
|
||||
// At the same time it tries to request a decryption key from all available Constellation control-plane nodes.
|
||||
func (r *nodeRecoverer) Do(uuid, endpoint string) (passphrase, measurementSecret []byte, err error) {
|
||||
func (r *NodeRecoverer) Do(uuid, endpoint string) (passphrase, measurementSecret []byte, err error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
lis, err := net.Listen("tcp", endpoint)
|
||||
|
@ -135,7 +135,7 @@ func TestPrepareExistingDisk(t *testing.T) {
|
||||
require.NoError(t, handler.WriteJSON(stateInfoPath, nodestate.NodeState{MeasurementSalt: salt}, file.OptMkdirAll))
|
||||
}
|
||||
|
||||
setupManager := &SetupManager{
|
||||
setupManager := &Manager{
|
||||
log: logger.NewTest(t),
|
||||
csp: "test",
|
||||
diskPath: "disk-path",
|
||||
@ -213,7 +213,7 @@ func TestPrepareNewDisk(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
setupManager := &SetupManager{
|
||||
setupManager := &Manager{
|
||||
log: logger.NewTest(t),
|
||||
csp: "test",
|
||||
diskPath: "disk-path",
|
||||
|
@ -16,9 +16,8 @@ import (
|
||||
// NewIssuer returns an SNP issuer if it can successfully read the idkeydigest from the TPM.
|
||||
// Otherwise returns a Trusted Launch issuer.
|
||||
func NewIssuer() atls.Issuer {
|
||||
if _, err := snp.GetIdKeyDigest(vtpm.OpenVTPM); err == nil {
|
||||
if _, err := snp.GetIDKeyDigest(vtpm.OpenVTPM); err == nil {
|
||||
return snp.NewIssuer()
|
||||
} else {
|
||||
return trustedlaunch.NewIssuer()
|
||||
}
|
||||
return trustedlaunch.NewIssuer()
|
||||
}
|
||||
|
@ -28,8 +28,8 @@ const (
|
||||
tpmAkIdx = 0x81000003
|
||||
)
|
||||
|
||||
// GetIdKeyDigest reads the idkeydigest from the snp report saved in the TPM's non-volatile memory.
|
||||
func GetIdKeyDigest(open vtpm.TPMOpenFunc) ([]byte, error) {
|
||||
// GetIDKeyDigest reads the idkeydigest from the snp report saved in the TPM's non-volatile memory.
|
||||
func GetIDKeyDigest(open vtpm.TPMOpenFunc) ([]byte, error) {
|
||||
tpm, err := open()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -75,7 +75,7 @@ func NewIssuer() *Issuer {
|
||||
// The attestation report is loaded from the TPM, the certificate chain is queried
|
||||
// from the cloud metadata API.
|
||||
// [1] https://github.com/AMDESE/sev-guest/blob/main/include/attestation.h
|
||||
func getInstanceInfo(reportGetter tpmReportGetter, imdsAPI imdsApi) func(tpm io.ReadWriteCloser) ([]byte, error) {
|
||||
func getInstanceInfo(reportGetter tpmReportGetter, imdsapi imdsAPI) func(tpm io.ReadWriteCloser) ([]byte, error) {
|
||||
return func(tpm io.ReadWriteCloser) ([]byte, error) {
|
||||
hclReport, err := reportGetter.get(tpm)
|
||||
if err != nil {
|
||||
@ -88,7 +88,7 @@ func getInstanceInfo(reportGetter tpmReportGetter, imdsAPI imdsApi) func(tpm io.
|
||||
|
||||
runtimeData, _, _ := bytes.Cut(hclReport[lenSnpReport+lenSnpReportRuntimeDataPadding:], []byte{0})
|
||||
|
||||
vcekResponse, err := imdsAPI.getVcek(context.TODO())
|
||||
vcekResponse, err := imdsapi.getVcek(context.TODO())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getVcekFromIMDS: %w", err)
|
||||
}
|
||||
@ -128,6 +128,6 @@ type tpmReportGetter interface {
|
||||
get(tpm io.ReadWriteCloser) ([]byte, error)
|
||||
}
|
||||
|
||||
type imdsApi interface {
|
||||
type imdsAPI interface {
|
||||
getVcek(ctx context.Context) (vcekResponse, error)
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ func TestGetSNPAttestation(t *testing.T) {
|
||||
err: nil,
|
||||
}
|
||||
|
||||
attestationJson, err := getInstanceInfo(&snpAttestationReport, imdsClient)(tpm)
|
||||
attestationJSON, err := getInstanceInfo(&snpAttestationReport, imdsClient)(tpm)
|
||||
if tc.wantErr {
|
||||
assert.Error(err)
|
||||
return
|
||||
@ -75,7 +75,7 @@ func TestGetSNPAttestation(t *testing.T) {
|
||||
assert.NoError(err)
|
||||
|
||||
var instanceInfo azureInstanceInfo
|
||||
err = json.Unmarshal(attestationJson, &instanceInfo)
|
||||
err = json.Unmarshal(attestationJSON, &instanceInfo)
|
||||
assert.NoError(err)
|
||||
|
||||
if tc.wantErr {
|
||||
|
@ -159,16 +159,16 @@ func validateSNPReport(cert *x509.Certificate, expectedIDKeyDigest []byte, enfor
|
||||
return fmt.Errorf("mismatching vcek extensions: %w", err)
|
||||
}
|
||||
|
||||
sig_r := report.Signature.R[:]
|
||||
sig_s := report.Signature.S[:]
|
||||
sigR := report.Signature.R[:]
|
||||
sigS := report.Signature.S[:]
|
||||
|
||||
// Table 107 in https://www.amd.com/system/files/TechDocs/56860.pdf mentions little endian signature components.
|
||||
// They come out of the certificate as big endian.
|
||||
reverseEndian(sig_r)
|
||||
reverseEndian(sig_s)
|
||||
reverseEndian(sigR)
|
||||
reverseEndian(sigS)
|
||||
|
||||
rParam := new(big.Int).SetBytes(sig_r)
|
||||
sParam := new(big.Int).SetBytes(sig_s)
|
||||
rParam := new(big.Int).SetBytes(sigR)
|
||||
sParam := new(big.Int).SetBytes(sigS)
|
||||
sequence := ecdsaSig{rParam, sParam}
|
||||
sigEncoded, err := asn1.Marshal(sequence)
|
||||
if err != nil {
|
||||
|
File diff suppressed because one or more lines are too long
@ -135,10 +135,10 @@ type AzureConfig struct {
|
||||
EnforcedMeasurements []uint32 `yaml:"enforcedMeasurements"`
|
||||
// description: |
|
||||
// Expected value for the field 'idkeydigest' in the AMD SEV-SNP attestation report. Only usable with ConfidentialVMs. See 4.6 and 7.3 in: https://www.amd.com/system/files/TechDocs/56860.pdf
|
||||
IdKeyDigest string `yaml:"idKeyDigest" validate:"required_if=EnforceIdKeyDigest true,omitempty,hexadecimal,len=96"`
|
||||
IDKeyDigest string `yaml:"idKeyDigest" validate:"required_if=EnforceIdKeyDigest true,omitempty,hexadecimal,len=96"`
|
||||
// description: |
|
||||
// Enforce the specified idKeyDigest value during remote attestation.
|
||||
EnforceIdKeyDigest *bool `yaml:"enforceIdKeyDigest" validate:"required"`
|
||||
EnforceIDKeyDigest *bool `yaml:"enforceIdKeyDigest" validate:"required"`
|
||||
// description: |
|
||||
// Use Confidential VMs. If set to false, Trusted Launch VMs are used instead. See: https://docs.microsoft.com/en-us/azure/confidential-computing/confidential-vm-overview
|
||||
ConfidentialVM *bool `yaml:"confidentialVM" validate:"required"`
|
||||
@ -223,8 +223,8 @@ func Default() *Config {
|
||||
StateDiskType: "Premium_LRS",
|
||||
Measurements: copyPCRMap(azurePCRs),
|
||||
EnforcedMeasurements: []uint32{4, 8, 9, 11, 12},
|
||||
IdKeyDigest: "57486a447ec0f1958002a22a06b7673b9fd27d11e1c6527498056054c5fa92d23c50f9de44072760fe2b6fb89740b696",
|
||||
EnforceIdKeyDigest: func() *bool { b := true; return &b }(),
|
||||
IDKeyDigest: "57486a447ec0f1958002a22a06b7673b9fd27d11e1c6527498056054c5fa92d23c50f9de44072760fe2b6fb89740b696",
|
||||
EnforceIDKeyDigest: func() *bool { b := true; return &b }(),
|
||||
ConfidentialVM: func() *bool { b := true; return &b }(),
|
||||
},
|
||||
GCP: &GCPConfig{
|
||||
@ -509,8 +509,8 @@ func (c *Config) IsAzureNonCVM() bool {
|
||||
return c.Provider.Azure != nil && c.Provider.Azure.ConfidentialVM != nil && !*c.Provider.Azure.ConfidentialVM
|
||||
}
|
||||
|
||||
func (c *Config) EnforcesIdKeyDigest() bool {
|
||||
return c.Provider.Azure != nil && c.Provider.Azure.EnforceIdKeyDigest != nil && *c.Provider.Azure.EnforceIdKeyDigest
|
||||
func (c *Config) EnforcesIDKeyDigest() bool {
|
||||
return c.Provider.Azure != nil && c.Provider.Azure.EnforceIDKeyDigest != nil && *c.Provider.Azure.EnforceIDKeyDigest
|
||||
}
|
||||
|
||||
// FromFile returns config file with `name` read from `fileHandler` by parsing
|
||||
|
@ -81,10 +81,10 @@ const (
|
||||
MeasurementSaltFilename = "measurementSalt"
|
||||
// MeasurementSecretFilename is the filename of the secret used in creation of the clusterID.
|
||||
MeasurementSecretFilename = "measurementSecret"
|
||||
// IdKeyDigestFilename is the name of the file holding the currently enforced idkeydigest.
|
||||
IdKeyDigestFilename = "idkeydigest"
|
||||
// EnforceIdKeyDigestFilename is the name of the file configuring whether idkeydigest is enforced or not.
|
||||
EnforceIdKeyDigestFilename = "enforceIdKeyDigest"
|
||||
// IDKeyDigestFilename is the name of the file holding the currently enforced idkeydigest.
|
||||
IDKeyDigestFilename = "idkeydigest"
|
||||
// EnforceIDKeyDigestFilename is the name of the file configuring whether idkeydigest is enforced or not.
|
||||
EnforceIDKeyDigestFilename = "enforceIdKeyDigest"
|
||||
// AzureCVM is the name of the file indicating whether the cluster is expected to run on CVMs or not.
|
||||
AzureCVM = "azureCVM"
|
||||
// K8sVersion is the filename of the mapped "k8s-version" configMap file.
|
||||
|
@ -58,15 +58,20 @@ const (
|
||||
NodeMaintenanceOperatorVersion = "v0.13.1-alpha1"
|
||||
|
||||
// currently supported versions.
|
||||
V1_22 ValidK8sVersion = "1.22"
|
||||
V1_23 ValidK8sVersion = "1.23"
|
||||
V1_24 ValidK8sVersion = "1.24"
|
||||
V1_25 ValidK8sVersion = "1.25"
|
||||
//nolint:revive
|
||||
V1_22 ValidK8sVersion = "1.22"
|
||||
//nolint:revive
|
||||
V1_23 ValidK8sVersion = "1.23"
|
||||
//nolint:revive
|
||||
V1_24 ValidK8sVersion = "1.24"
|
||||
//nolint:revive
|
||||
V1_25 ValidK8sVersion = "1.25"
|
||||
|
||||
Default ValidK8sVersion = V1_23
|
||||
)
|
||||
|
||||
// versionConfigs holds download URLs for all required kubernetes components for every supported version.
|
||||
var VersionConfigs map[ValidK8sVersion]KubernetesVersion = map[ValidK8sVersion]KubernetesVersion{
|
||||
var VersionConfigs = map[ValidK8sVersion]KubernetesVersion{
|
||||
V1_22: {
|
||||
PatchVersion: "1.22.12",
|
||||
CNIPluginsURL: "https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz",
|
||||
|
@ -108,21 +108,21 @@ func (u *Updatable) Update() error {
|
||||
u.log.Debugf("Enforced PCRs: %v", enforced)
|
||||
|
||||
var idkeydigest []byte
|
||||
var enforceIdKeyDigest bool
|
||||
var enforceIDKeyDigest bool
|
||||
if u.csp == cloudprovider.Azure && u.azureCVM {
|
||||
u.log.Infof("Updating encforceIdKeyDigest value")
|
||||
enforceRaw, err := u.fileHandler.Read(filepath.Join(constants.ServiceBasePath, constants.EnforceIdKeyDigestFilename))
|
||||
enforceRaw, err := u.fileHandler.Read(filepath.Join(constants.ServiceBasePath, constants.EnforceIDKeyDigestFilename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
enforceIdKeyDigest, err = strconv.ParseBool(string(enforceRaw))
|
||||
enforceIDKeyDigest, err = strconv.ParseBool(string(enforceRaw))
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing content of EnforceIdKeyDigestFilename: %s: %w", enforceRaw, err)
|
||||
}
|
||||
u.log.Debugf("New encforceIdKeyDigest value: %v", enforceIdKeyDigest)
|
||||
u.log.Debugf("New encforceIdKeyDigest value: %v", enforceIDKeyDigest)
|
||||
|
||||
u.log.Infof("Updating expected idkeydigest")
|
||||
idkeydigestRaw, err := u.fileHandler.Read(filepath.Join(constants.ServiceBasePath, constants.IdKeyDigestFilename))
|
||||
idkeydigestRaw, err := u.fileHandler.Read(filepath.Join(constants.ServiceBasePath, constants.IDKeyDigestFilename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -133,7 +133,7 @@ func (u *Updatable) Update() error {
|
||||
u.log.Debugf("New idkeydigest: %x", idkeydigest)
|
||||
}
|
||||
|
||||
u.Validator = u.newValidator(measurements, enforced, idkeydigest, enforceIdKeyDigest, u.log)
|
||||
u.Validator = u.newValidator(measurements, enforced, idkeydigest, enforceIDKeyDigest, u.log)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -84,11 +84,11 @@ func TestNewUpdateableValidator(t *testing.T) {
|
||||
[]uint32{11},
|
||||
))
|
||||
require.NoError(handler.Write(
|
||||
filepath.Join(constants.ServiceBasePath, constants.IdKeyDigestFilename),
|
||||
filepath.Join(constants.ServiceBasePath, constants.IDKeyDigestFilename),
|
||||
[]byte{},
|
||||
))
|
||||
require.NoError(handler.Write(
|
||||
filepath.Join(constants.ServiceBasePath, constants.EnforceIdKeyDigestFilename),
|
||||
filepath.Join(constants.ServiceBasePath, constants.EnforceIDKeyDigestFilename),
|
||||
[]byte("false"),
|
||||
))
|
||||
require.NoError(handler.Write(
|
||||
@ -145,11 +145,11 @@ func TestUpdate(t *testing.T) {
|
||||
[]uint32{11},
|
||||
))
|
||||
require.NoError(handler.Write(
|
||||
filepath.Join(constants.ServiceBasePath, constants.IdKeyDigestFilename),
|
||||
filepath.Join(constants.ServiceBasePath, constants.IDKeyDigestFilename),
|
||||
[]byte{},
|
||||
))
|
||||
require.NoError(handler.Write(
|
||||
filepath.Join(constants.ServiceBasePath, constants.EnforceIdKeyDigestFilename),
|
||||
filepath.Join(constants.ServiceBasePath, constants.EnforceIDKeyDigestFilename),
|
||||
[]byte("false"),
|
||||
))
|
||||
require.NoError(handler.Write(
|
||||
@ -215,11 +215,11 @@ func TestUpdateConcurrency(t *testing.T) {
|
||||
[]uint32{11},
|
||||
))
|
||||
require.NoError(handler.Write(
|
||||
filepath.Join(constants.ServiceBasePath, constants.IdKeyDigestFilename),
|
||||
filepath.Join(constants.ServiceBasePath, constants.IDKeyDigestFilename),
|
||||
[]byte{},
|
||||
))
|
||||
require.NoError(handler.Write(
|
||||
filepath.Join(constants.ServiceBasePath, constants.EnforceIdKeyDigestFilename),
|
||||
filepath.Join(constants.ServiceBasePath, constants.EnforceIDKeyDigestFilename),
|
||||
[]byte("false"),
|
||||
))
|
||||
require.NoError(handler.Write(
|
||||
|
@ -58,7 +58,7 @@ func main() {
|
||||
// set up Key Management Service
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)
|
||||
defer cancel()
|
||||
conKMS, err := setup.SetUpKMS(ctx, setup.NoStoreURI, keyURI)
|
||||
conKMS, err := setup.KMS(ctx, setup.NoStoreURI, keyURI)
|
||||
if err != nil {
|
||||
log.With(zap.Error(err)).Fatalf("Failed to setup KMS")
|
||||
}
|
||||
|
@ -13,25 +13,25 @@ import (
|
||||
"github.com/edgelesssys/constellation/v2/internal/crypto"
|
||||
)
|
||||
|
||||
// ClusterKMS implements the kms.CloudKMS interface for in cluster key management.
|
||||
type ClusterKMS struct {
|
||||
// KMS implements the kms.CloudKMS interface for in cluster key management.
|
||||
type KMS struct {
|
||||
masterKey []byte
|
||||
salt []byte
|
||||
}
|
||||
|
||||
// New creates a new ClusterKMS.
|
||||
func New(salt []byte) *ClusterKMS {
|
||||
return &ClusterKMS{salt: salt}
|
||||
func New(salt []byte) *KMS {
|
||||
return &KMS{salt: salt}
|
||||
}
|
||||
|
||||
// CreateKEK sets the ClusterKMS masterKey.
|
||||
func (c *ClusterKMS) CreateKEK(ctx context.Context, keyID string, kek []byte) error {
|
||||
func (c *KMS) CreateKEK(ctx context.Context, keyID string, kek []byte) error {
|
||||
c.masterKey = kek
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDEK derives a key from the KMS masterKey.
|
||||
func (c *ClusterKMS) GetDEK(ctx context.Context, kekID string, dekID string, dekSize int) ([]byte, error) {
|
||||
func (c *KMS) GetDEK(ctx context.Context, kekID string, dekID string, dekSize int) ([]byte, error) {
|
||||
if len(c.masterKey) == 0 {
|
||||
return nil, errors.New("master key not set for Constellation KMS")
|
||||
}
|
||||
|
@ -40,8 +40,8 @@ type KMSInformation struct {
|
||||
KeyEncryptionKeyID string
|
||||
}
|
||||
|
||||
// SetUpKMS creates a KMS and key store from the given parameters.
|
||||
func SetUpKMS(ctx context.Context, storageURI, kmsURI string) (kms.CloudKMS, error) {
|
||||
// KMS creates a KMS and key store from the given parameters.
|
||||
func KMS(ctx context.Context, storageURI, kmsURI string) (kms.CloudKMS, error) {
|
||||
store, err := getStore(ctx, storageURI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -131,11 +131,11 @@ func TestGetKMS(t *testing.T) {
|
||||
func TestSetUpKMS(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
kms, err := SetUpKMS(context.Background(), "storage://unknown", "kms://unknown")
|
||||
kms, err := KMS(context.Background(), "storage://unknown", "kms://unknown")
|
||||
assert.Error(err)
|
||||
assert.Nil(kms)
|
||||
|
||||
kms, err = SetUpKMS(context.Background(), "storage://no-store", "kms://cluster-kms?salt="+base64.URLEncoding.EncodeToString([]byte("salt")))
|
||||
kms, err = KMS(context.Background(), "storage://no-store", "kms://cluster-kms?salt="+base64.URLEncoding.EncodeToString([]byte("salt")))
|
||||
assert.NoError(err)
|
||||
assert.NotNil(kms)
|
||||
}
|
||||
|
@ -32,9 +32,8 @@ func (c *Client) GetNodeImage(ctx context.Context, providerID string) (string, e
|
||||
}
|
||||
if resp.Properties.StorageProfile.ImageReference.ID != nil {
|
||||
return *resp.Properties.StorageProfile.ImageReference.ID, nil
|
||||
} else {
|
||||
return *resp.Properties.StorageProfile.ImageReference.CommunityGalleryImageID, nil
|
||||
}
|
||||
return *resp.Properties.StorageProfile.ImageReference.CommunityGalleryImageID, nil
|
||||
}
|
||||
|
||||
// GetScalingGroupID returns the scaling group ID of the node.
|
||||
|
@ -222,7 +222,7 @@ func TestCreateNode(t *testing.T) {
|
||||
list: tc.preexistingVMs,
|
||||
fetchErr: tc.fetchErr,
|
||||
}
|
||||
poller := NewStubCapacityPoller(tc.pollErr)
|
||||
poller := newStubCapacityPoller(tc.pollErr)
|
||||
client := Client{
|
||||
virtualMachineScaleSetVMsAPI: &stubvirtualMachineScaleSetVMsAPI{
|
||||
pager: pager,
|
||||
@ -357,7 +357,7 @@ type stubCapacityPoller struct {
|
||||
doneC chan struct{}
|
||||
}
|
||||
|
||||
func NewStubCapacityPoller(pollErr error) *stubCapacityPoller {
|
||||
func newStubCapacityPoller(pollErr error) *stubCapacityPoller {
|
||||
return &stubCapacityPoller{
|
||||
pollErr: pollErr,
|
||||
pollC: make(chan struct{}),
|
||||
|
@ -34,9 +34,8 @@ func (c *Client) GetScalingGroupImage(ctx context.Context, scalingGroupID string
|
||||
}
|
||||
if res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.ID != nil {
|
||||
return *res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.ID, nil
|
||||
} else {
|
||||
return *res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.CommunityGalleryImageID, nil
|
||||
}
|
||||
return *res.Properties.VirtualMachineProfile.StorageProfile.ImageReference.CommunityGalleryImageID, nil
|
||||
}
|
||||
|
||||
// SetScalingGroupImage sets the image URI of the scaling group.
|
||||
|
Loading…
Reference in New Issue
Block a user