AB#2589: Deploy operators via Helm (#575)

* Only deploy operators on GCP/Azure.
* cert-manager is now deployed by default (GCP/Azure)
* remove OLM
This commit is contained in:
Otto Bittner 2022-11-21 10:35:40 +01:00 committed by GitHub
parent b8d991f84c
commit bdd9dd922b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
146 changed files with 12799 additions and 8706 deletions

View File

@ -27,6 +27,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
<!-- For changes in existing functionality. -->
- Constellation operators are now deployed using Helm.
### Deprecated
<!-- For soon-to-be removed features. -->

View File

@ -47,29 +47,24 @@ func New(log *logger.Logger) (*Client, error) {
return nil, err
}
action := action.NewInstall(actionConfig)
action.Namespace = constants.HelmNamespace
action.Timeout = timeout
return &Client{
action.NewInstall(actionConfig),
action,
}, nil
}
// InstallConstellationServices installs the constellation-services chart. In the future this chart should bundle all microservices.
func (h *Client) InstallConstellationServices(ctx context.Context, release helm.Release, extraVals map[string]any) error {
h.Namespace = constants.HelmNamespace
h.ReleaseName = release.ReleaseName
h.Wait = release.Wait
h.Timeout = timeout
mergedVals := mergeMaps(release.Values, extraVals)
reader := bytes.NewReader(release.Chart)
chart, err := loader.LoadArchive(reader)
if err != nil {
return fmt.Errorf("helm load archive: %w", err)
}
_, err = h.RunWithContext(ctx, chart, mergedVals)
if err != nil {
return fmt.Errorf("helm install services: %w", err)
if err := h.install(ctx, release.Chart, mergedVals); err != nil {
return err
}
return nil
@ -96,12 +91,36 @@ func mergeMaps(a, b map[string]any) map[string]any {
return out
}
// InstallCilium sets up the cilium pod network.
func (h *Client) InstallCilium(ctx context.Context, kubectl k8sapi.Client, release helm.Release, in k8sapi.SetupPodNetworkInput) error {
h.Namespace = constants.HelmNamespace
// InstallCertManager installs the cert-manager chart.
func (h *Client) InstallCertManager(ctx context.Context, release helm.Release) error {
h.ReleaseName = release.ReleaseName
h.Wait = release.Wait
if err := h.install(ctx, release.Chart, release.Values); err != nil {
return err
}
return nil
}
// InstallOperators installs the Constellation Operators.
func (h *Client) InstallOperators(ctx context.Context, release helm.Release, extraVals map[string]any) error {
h.ReleaseName = release.ReleaseName
h.Wait = release.Wait
mergedVals := mergeMaps(release.Values, extraVals)
if err := h.install(ctx, release.Chart, mergedVals); err != nil {
return err
}
return nil
}
// InstallCilium sets up the cilium pod network.
func (h *Client) InstallCilium(ctx context.Context, kubectl k8sapi.Client, release helm.Release, in k8sapi.SetupPodNetworkInput) error {
h.ReleaseName = release.ReleaseName
h.Wait = release.Wait
h.Timeout = timeout
switch in.CloudProvider {
case "aws", "azure", "qemu":
@ -121,15 +140,8 @@ func (h *Client) installCiliumGeneric(ctx context.Context, release helm.Release,
release.Values["k8sServiceHost"] = host
release.Values["k8sServicePort"] = strconv.Itoa(constants.KubernetesPort)
reader := bytes.NewReader(release.Chart)
chart, err := loader.LoadArchive(reader)
if err != nil {
return fmt.Errorf("helm load archive: %w", err)
}
_, err = h.RunWithContext(ctx, chart, release.Values)
if err != nil {
return fmt.Errorf("installing cilium: %w", err)
if err := h.install(ctx, release.Chart, release.Values); err != nil {
return err
}
return nil
}
@ -179,16 +191,23 @@ func (h *Client) installCiliumGCP(ctx context.Context, kubectl k8sapi.Client, re
release.Values["k8sServicePort"] = port
}
reader := bytes.NewReader(release.Chart)
if err := h.install(ctx, release.Chart, release.Values); err != nil {
return err
}
return nil
}
func (h *Client) install(ctx context.Context, chartRaw []byte, values map[string]any) error {
reader := bytes.NewReader(chartRaw)
chart, err := loader.LoadArchive(reader)
if err != nil {
return fmt.Errorf("helm load archive: %w", err)
}
_, err = h.RunWithContext(ctx, chart, release.Values)
_, err = h.RunWithContext(ctx, chart, values)
if err != nil {
return fmt.Errorf("helm install cilium: %w", err)
return fmt.Errorf("helm install: %w", err)
}
return nil
}

View File

@ -42,8 +42,6 @@ import (
const (
// kubeletStartTimeout is the maximum time given to the kubelet service to (re)start.
kubeletStartTimeout = 10 * time.Minute
// crdTimeout is the maximum time given to the CRDs to be created.
crdTimeout = 30 * time.Second
)
// Client provides the functions to talk to the k8s API.
@ -326,19 +324,6 @@ func (k *KubernetesUtil) SetupVerificationService(kubectl Client, verificationSe
return kubectl.Apply(verificationServiceConfiguration, true)
}
// SetupOperatorLifecycleManager deploys operator lifecycle manager.
func (k *KubernetesUtil) SetupOperatorLifecycleManager(ctx context.Context, kubectl Client, olmCRDs, olmConfiguration kubernetes.Marshaler, crdNames []string) error {
if err := kubectl.Apply(olmCRDs, true); err != nil {
return fmt.Errorf("applying OLM CRDs: %w", err)
}
crdReadyTimeout, cancel := context.WithTimeout(ctx, crdTimeout)
defer cancel()
if err := kubectl.WaitForCRDs(crdReadyTimeout, crdNames); err != nil {
return fmt.Errorf("waiting for OLM CRDs: %w", err)
}
return kubectl.Apply(olmConfiguration, true)
}
// SetupNodeMaintenanceOperator deploys node maintenance operator.
func (k *KubernetesUtil) SetupNodeMaintenanceOperator(kubectl Client, nodeMaintenanceOperatorConfiguration kubernetes.Marshaler) error {
return kubectl.Apply(nodeMaintenanceOperatorConfiguration, true)

View File

@ -1,87 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package resources
import (
"time"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/versions"
operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
nodeMaintenanceOperatorNamespace = "kube-system"
nodeMaintenanceOperatorCatalogNamespace = "olm"
)
// NodeMaintenanceOperatorDeployment groups all deployments for node maintenance operator.
type NodeMaintenanceOperatorDeployment struct {
CatalogSource operatorsv1alpha1.CatalogSource
OperatorGroup operatorsv1.OperatorGroup
Subscription operatorsv1alpha1.Subscription
}
// NewNodeMaintenanceOperatorDeployment creates a new node maintenance operator (NMO) deployment.
// See https://github.com/medik8s/node-maintenance-operator for more information.
func NewNodeMaintenanceOperatorDeployment() *NodeMaintenanceOperatorDeployment {
return &NodeMaintenanceOperatorDeployment{
CatalogSource: operatorsv1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
ObjectMeta: metav1.ObjectMeta{
Name: "node-maintenance-operator-catalog",
Namespace: nodeMaintenanceOperatorCatalogNamespace,
},
Spec: operatorsv1alpha1.CatalogSourceSpec{
SourceType: "grpc",
Image: versions.NodeMaintenanceOperatorCatalogImage,
DisplayName: "Node Maintenance Operator",
Publisher: "Medik8s Team",
UpdateStrategy: &operatorsv1alpha1.UpdateStrategy{
RegistryPoll: &operatorsv1alpha1.RegistryPoll{
RawInterval: "1m0s",
Interval: &metav1.Duration{
Duration: time.Minute,
},
},
},
},
},
OperatorGroup: operatorsv1.OperatorGroup{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1", Kind: "OperatorGroup"},
ObjectMeta: metav1.ObjectMeta{
Name: "constellation-og",
Namespace: nodeMaintenanceOperatorNamespace,
},
Spec: operatorsv1.OperatorGroupSpec{
UpgradeStrategy: operatorsv1.UpgradeStrategyDefault,
},
},
Subscription: operatorsv1alpha1.Subscription{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "Subscription"},
ObjectMeta: metav1.ObjectMeta{
Name: "node-maintenance-operator-sub",
Namespace: nodeMaintenanceOperatorNamespace,
},
Spec: &operatorsv1alpha1.SubscriptionSpec{
Channel: "stable",
Package: "node-maintenance-operator",
CatalogSource: "node-maintenance-operator-catalog",
CatalogSourceNamespace: "olm",
InstallPlanApproval: operatorsv1alpha1.ApprovalAutomatic,
StartingCSV: "node-maintenance-operator." + versions.NodeMaintenanceOperatorVersion,
},
},
}
}
// Marshal to Kubernetes YAML.
func (c *NodeMaintenanceOperatorDeployment) Marshal() ([]byte, error) {
return kubernetes.MarshalK8SResources(c)
}

View File

@ -1,28 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package resources
import (
"testing"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNodeMaintenanceOperatorMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
nmoDepl := NewNodeMaintenanceOperatorDeployment()
data, err := nmoDepl.Marshal()
require.NoError(err)
var recreated NodeMaintenanceOperatorDeployment
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
assert.Equal(nmoDepl, &recreated)
}

View File

@ -1,92 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package resources
import (
"time"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/versions"
operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
nodeOperatorNamespace = "kube-system"
nodeOperatorCatalogNamespace = "olm"
)
// NodeOperatorDeployment groups all deployments for node operator.
type NodeOperatorDeployment struct {
CatalogSource operatorsv1alpha1.CatalogSource
OperatorGroup operatorsv1.OperatorGroup
Subscription operatorsv1alpha1.Subscription
}
// NewNodeOperatorDeployment creates a new constellation node operator deployment.
// See /operators/constellation-node-operator for more information.
func NewNodeOperatorDeployment(cloudProvider string, uid string) *NodeOperatorDeployment {
return &NodeOperatorDeployment{
CatalogSource: operatorsv1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
ObjectMeta: metav1.ObjectMeta{
Name: "constellation-node-operator-catalog",
Namespace: nodeOperatorCatalogNamespace,
},
Spec: operatorsv1alpha1.CatalogSourceSpec{
SourceType: "grpc",
Image: versions.NodeOperatorCatalogImage,
DisplayName: "Constellation Node Operator",
Publisher: "Edgeless Systems",
UpdateStrategy: &operatorsv1alpha1.UpdateStrategy{
RegistryPoll: &operatorsv1alpha1.RegistryPoll{
RawInterval: "1m0s",
Interval: &metav1.Duration{Duration: 1 * time.Minute},
},
},
},
},
OperatorGroup: operatorsv1.OperatorGroup{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1", Kind: "OperatorGroup"},
ObjectMeta: metav1.ObjectMeta{
Name: "constellation-og",
Namespace: nodeOperatorNamespace,
},
Spec: operatorsv1.OperatorGroupSpec{
UpgradeStrategy: operatorsv1.UpgradeStrategyDefault,
},
},
Subscription: operatorsv1alpha1.Subscription{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "Subscription"},
ObjectMeta: metav1.ObjectMeta{
Name: "constellation-node-operator-sub",
Namespace: nodeOperatorNamespace,
},
Spec: &operatorsv1alpha1.SubscriptionSpec{
Channel: "alpha",
Package: "node-operator",
CatalogSource: "constellation-node-operator-catalog",
CatalogSourceNamespace: "olm",
InstallPlanApproval: operatorsv1alpha1.ApprovalAutomatic,
StartingCSV: "node-operator." + versions.NodeOperatorVersion,
Config: &operatorsv1alpha1.SubscriptionConfig{
Env: []corev1.EnvVar{
{Name: "CONSTEL_CSP", Value: cloudProvider},
{Name: "constellation-uid", Value: uid},
},
},
},
},
}
}
// Marshal to Kubernetes YAML.
func (c *NodeOperatorDeployment) Marshal() ([]byte, error) {
return kubernetes.MarshalK8SResources(c)
}

View File

@ -1,28 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package resources
import (
"testing"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNodeOperatorMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
nmoDepl := NewNodeOperatorDeployment("csp", "uid")
data, err := nmoDepl.Marshal()
require.NoError(err)
var recreated NodeOperatorDeployment
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
assert.Equal(nmoDepl, &recreated)
}

View File

@ -1,37 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package resources
import "github.com/edgelesssys/constellation/v2/internal/crds"
// OLMCRDNames are the names of the custom resource definitions that are used by the olm operator.
var OLMCRDNames = []string{
"catalogsources.operators.coreos.com",
"clusterserviceversions.operators.coreos.com",
"installplans.operators.coreos.com",
"olmconfigs.operators.coreos.com",
"operatorconditions.operators.coreos.com",
"operatorgroups.operators.coreos.com",
"operators.operators.coreos.com",
"subscriptions.operators.coreos.com",
}
// OperatorLifecycleManagerCRDs contains custom resource definitions used by the olm operator.
type OperatorLifecycleManagerCRDs struct{}
// Marshal returns the already marshalled CRDs.
func (m *OperatorLifecycleManagerCRDs) Marshal() ([]byte, error) {
return crds.OLMCRDs, nil
}
// OperatorLifecycleManager is the deployment of the olm operator.
type OperatorLifecycleManager struct{}
// Marshal returns the already marshalled deployment yaml.
func (m *OperatorLifecycleManager) Marshal() ([]byte, error) {
return crds.OLM, nil
}

View File

@ -25,9 +25,6 @@ type clusterUtil interface {
SetupKonnectivity(kubectl k8sapi.Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error
SetupVerificationService(kubectl k8sapi.Client, verificationServiceConfiguration kubernetes.Marshaler) error
SetupGCPGuestAgent(kubectl k8sapi.Client, gcpGuestAgentConfiguration kubernetes.Marshaler) error
SetupOperatorLifecycleManager(ctx context.Context, kubectl k8sapi.Client, olmCRDs, olmConfiguration kubernetes.Marshaler, crdNames []string) error
SetupNodeMaintenanceOperator(kubectl k8sapi.Client, nodeMaintenanceOperatorConfiguration kubernetes.Marshaler) error
SetupNodeOperator(ctx context.Context, kubectl k8sapi.Client, nodeOperatorConfiguration kubernetes.Marshaler) error
FixCilium(log *logger.Logger)
StartKubelet() error
}
@ -37,5 +34,7 @@ type clusterUtil interface {
// Naming is inspired by Helm.
type helmClient interface {
InstallCilium(context.Context, k8sapi.Client, helm.Release, k8sapi.SetupPodNetworkInput) error
InstallCertManager(ctx context.Context, release helm.Release) error
InstallOperators(ctx context.Context, release helm.Release, extraVals map[string]any) error
InstallConstellationServices(ctx context.Context, release helm.Release, extraVals map[string]any) error
}

View File

@ -211,8 +211,21 @@ func (k *KubeWrapper) InitCluster(
return nil, fmt.Errorf("failed to setup verification service: %w", err)
}
if err := k.setupOperators(ctx); err != nil {
return nil, fmt.Errorf("setting up operators: %w", err)
// cert-manager is necessary for our operator deployments.
// They are currently only deployed on GCP & Azure. This is why we deploy cert-manager only on GCP & Azure.
if k.cloudProvider == "gcp" || k.cloudProvider == "azure" {
if err = k.helmClient.InstallCertManager(ctx, helmReleases.CertManager); err != nil {
return nil, fmt.Errorf("installing cert-manager: %w", err)
}
}
operatorVals, err := k.setupOperatorVals(ctx)
if err != nil {
return nil, fmt.Errorf("setting up operator vals: %w", err)
}
if err = k.helmClient.InstallOperators(ctx, helmReleases.Operators, operatorVals); err != nil {
return nil, fmt.Errorf("installing operators: %w", err)
}
if k.cloudProvider == "gcp" {
@ -346,28 +359,6 @@ func (k *KubeWrapper) setupInternalConfigMap(ctx context.Context, azureCVM strin
return nil
}
// setupOperators deploys the operator lifecycle manager and subscriptions to operators.
func (k *KubeWrapper) setupOperators(ctx context.Context) error {
if err := k.clusterUtil.SetupOperatorLifecycleManager(ctx, k.client, &resources.OperatorLifecycleManagerCRDs{}, &resources.OperatorLifecycleManager{}, resources.OLMCRDNames); err != nil {
return fmt.Errorf("setting up OLM: %w", err)
}
if err := k.clusterUtil.SetupNodeMaintenanceOperator(k.client, resources.NewNodeMaintenanceOperatorDeployment()); err != nil {
return fmt.Errorf("setting up node maintenance operator: %w", err)
}
uid, err := k.providerMetadata.UID(ctx)
if err != nil {
return fmt.Errorf("retrieving constellation UID: %w", err)
}
if err := k.clusterUtil.SetupNodeOperator(ctx, k.client, resources.NewNodeOperatorDeployment(k.cloudProvider, uid)); err != nil {
return fmt.Errorf("setting up constellation node operator: %w", err)
}
return nil
}
// k8sCompliantHostname transforms a hostname to an RFC 1123 compliant, lowercase subdomain as required by Kubernetes node names.
// The following regex is used by k8s for validation: /^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$/ .
// Only a simple heuristic is used for now (to lowercase, replace underscores).
@ -497,6 +488,19 @@ func (k *KubeWrapper) setupExtraVals(ctx context.Context, initialMeasurementsJSO
return extraVals, nil
}
func (k *KubeWrapper) setupOperatorVals(ctx context.Context) (map[string]any, error) {
uid, err := k.providerMetadata.UID(ctx)
if err != nil {
return nil, fmt.Errorf("retrieving constellation UID: %w", err)
}
return map[string]any{
"constellation-operator": map[string]any{
"constellationUID": uid,
},
}, nil
}
type ccmConfigGetter interface {
GetCCMConfig(ctx context.Context, providerID, cloudServiceAccountURI string) ([]byte, error)
}

View File

@ -551,14 +551,24 @@ func (s *stubKubeconfigReader) ReadKubeconfig() ([]byte, error) {
}
type stubHelmClient struct {
ciliumError error
servicesError error
ciliumError error
certManagerError error
operatorsError error
servicesError error
}
func (s *stubHelmClient) InstallCilium(ctx context.Context, kubectl k8sapi.Client, release helm.Release, in k8sapi.SetupPodNetworkInput) error {
return s.ciliumError
}
func (s *stubHelmClient) InstallCertManager(ctx context.Context, release helm.Release) error {
return s.certManagerError
}
func (s *stubHelmClient) InstallOperators(ctx context.Context, release helm.Release, extraVals map[string]any) error {
return s.operatorsError
}
func (s *stubHelmClient) InstallConstellationServices(ctx context.Context, release helm.Release, extraVals map[string]any) error {
return s.servicesError
}

View File

@ -0,0 +1,24 @@
annotations:
artifacthub.io/prerelease: "false"
artifacthub.io/signKey: |
fingerprint: 1020CF3C033D4F35BAE1C19E1226061C665DF13E
url: https://cert-manager.io/public-keys/cert-manager-keyring-2021-09-20-1020CF3C033D4F35BAE1C19E1226061C665DF13E.gpg
apiVersion: v1
appVersion: v1.10.0
description: A Helm chart for cert-manager
home: https://github.com/cert-manager/cert-manager
icon: https://raw.githubusercontent.com/cert-manager/cert-manager/d53c0b9270f8cd90d908460d69502694e1838f5f/logo/logo-small.png
keywords:
- cert-manager
- kube-lego
- letsencrypt
- tls
kubeVersion: '>= 1.20.0-0'
maintainers:
- email: cert-manager-maintainers@googlegroups.com
name: cert-manager-maintainers
url: https://cert-manager.io
name: cert-manager
sources:
- https://github.com/cert-manager/cert-manager
version: v1.10.0

View File

@ -0,0 +1,15 @@
cert-manager {{ .Chart.AppVersion }} has been deployed successfully!
In order to begin issuing certificates, you will need to set up a ClusterIssuer
or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer).
More information on the different types of issuers and how to configure them
can be found in our documentation:
https://cert-manager.io/docs/configuration/
For information on how to configure cert-manager to automatically provision
Certificates for Ingress resources, take a look at the `ingress-shim`
documentation:
https://cert-manager.io/docs/usage/ingress/

View File

@ -0,0 +1,174 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "cert-manager.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "cert-manager.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "cert-manager.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "cert-manager.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Webhook templates
*/}}
{{/*
Expand the name of the chart.
Manually fix the 'app' and 'name' labels to 'webhook' to maintain
compatibility with the v0.9 deployment selector.
*/}}
{{- define "webhook.name" -}}
{{- printf "webhook" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "webhook.fullname" -}}
{{- $trimmedName := printf "%s" (include "cert-manager.fullname" .) | trunc 55 | trimSuffix "-" -}}
{{- printf "%s-webhook" $trimmedName | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "webhook.caRef" -}}
{{- template "cert-manager.namespace" }}/{{ template "webhook.fullname" . }}-ca
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "webhook.serviceAccountName" -}}
{{- if .Values.webhook.serviceAccount.create -}}
{{ default (include "webhook.fullname" .) .Values.webhook.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.webhook.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
cainjector templates
*/}}
{{/*
Expand the name of the chart.
Manually fix the 'app' and 'name' labels to 'cainjector' to maintain
compatibility with the v0.9 deployment selector.
*/}}
{{- define "cainjector.name" -}}
{{- printf "cainjector" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "cainjector.fullname" -}}
{{- $trimmedName := printf "%s" (include "cert-manager.fullname" .) | trunc 52 | trimSuffix "-" -}}
{{- printf "%s-cainjector" $trimmedName | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "cainjector.serviceAccountName" -}}
{{- if .Values.cainjector.serviceAccount.create -}}
{{ default (include "cainjector.fullname" .) .Values.cainjector.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.cainjector.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
startupapicheck templates
*/}}
{{/*
Expand the name of the chart.
Manually fix the 'app' and 'name' labels to 'startupapicheck' to maintain
compatibility with the v0.9 deployment selector.
*/}}
{{- define "startupapicheck.name" -}}
{{- printf "startupapicheck" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "startupapicheck.fullname" -}}
{{- $trimmedName := printf "%s" (include "cert-manager.fullname" .) | trunc 52 | trimSuffix "-" -}}
{{- printf "%s-startupapicheck" $trimmedName | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "startupapicheck.serviceAccountName" -}}
{{- if .Values.startupapicheck.serviceAccount.create -}}
{{ default (include "startupapicheck.fullname" .) .Values.startupapicheck.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.startupapicheck.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "chartName" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Labels that should be added on each resource
*/}}
{{- define "labels" -}}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- if eq (default "helm" .Values.creator) "helm" }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
helm.sh/chart: {{ include "chartName" . }}
{{- end -}}
{{- if .Values.global.commonLabels}}
{{ toYaml .Values.global.commonLabels }}
{{- end }}
{{- end -}}
{{/*
Namespace for all resources to be installed into
If not defined in values file then the helm release namespace is used
By default this is not set so the helm release namespace will be used
This gets around an problem within helm discussed here
https://github.com/helm/helm/issues/5358
*/}}
{{- define "cert-manager.namespace" -}}
{{ .Values.namespace | default .Release.Namespace }}
{{- end -}}

View File

@ -0,0 +1,109 @@
{{- if .Values.cainjector.enabled }}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "cainjector.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
{{- with .Values.cainjector.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.cainjector.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- with .Values.cainjector.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 8 }}
{{- with .Values.cainjector.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cainjector.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "cainjector.serviceAccountName" . }}
{{- if hasKey .Values.cainjector "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.cainjector.automountServiceAccountToken }}
{{- end }}
{{- with .Values.global.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.cainjector.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-cainjector
{{- with .Values.cainjector.image }}
image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.cainjector.image.pullPolicy }}
args:
{{- if .Values.global.logLevel }}
- --v={{ .Values.global.logLevel }}
{{- end }}
{{- with .Values.global.leaderElection }}
- --leader-election-namespace={{ .namespace }}
{{- if .leaseDuration }}
- --leader-election-lease-duration={{ .leaseDuration }}
{{- end }}
{{- if .renewDeadline }}
- --leader-election-renew-deadline={{ .renewDeadline }}
{{- end }}
{{- if .retryPeriod }}
- --leader-election-retry-period={{ .retryPeriod }}
{{- end }}
{{- end }}
{{- with .Values.cainjector.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.cainjector.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.cainjector.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.cainjector.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cainjector.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cainjector.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.cainjector.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "cainjector.fullname" . }}-psp
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "cainjector.fullname" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,22 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cainjector.fullname" . }}-psp
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cainjector.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "cainjector.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,51 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "cainjector.fullname" . }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
{{- if .Values.global.podSecurityPolicy.useAppArmor }}
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
allowedCapabilities: [] # default set of capabilities are implicitly allowed
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
{{- end }}
{{- end }}

View File

@ -0,0 +1,103 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.global.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cainjector.fullname" . }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "create", "update", "patch"]
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["apiregistration.k8s.io"]
resources: ["apiservices"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list", "watch", "update"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cainjector.fullname" . }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cainjector.fullname" . }}
subjects:
- name: {{ template "cainjector.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
# leader election rules
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "cainjector.fullname" . }}:leaderelection
namespace: {{ .Values.global.leaderElection.namespace }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
rules:
# Used for leader election by the controller
# cert-manager-cainjector-leader-election is used by the CertificateBased injector controller
# see cmd/cainjector/start.go#L113
# cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller
# see cmd/cainjector/start.go#L137
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["cert-manager-cainjector-leader-election", "cert-manager-cainjector-leader-election-core"]
verbs: ["get", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
---
# grant cert-manager permission to manage the leaderelection configmap in the
# leader election namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "cainjector.fullname" . }}:leaderelection
namespace: {{ .Values.global.leaderElection.namespace }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "cainjector.fullname" . }}:leaderelection
subjects:
- kind: ServiceAccount
name: {{ template "cainjector.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.cainjector.enabled }}
{{- if .Values.cainjector.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: {{ .Values.cainjector.serviceAccount.automountServiceAccountToken }}
metadata:
name: {{ template "cainjector.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.cainjector.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app: {{ include "cainjector.name" . }}
app.kubernetes.io/name: {{ include "cainjector.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cainjector"
{{- include "labels" . | nindent 4 }}
{{- with .Values.cainjector.serviceAccount.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end }}
{{- end }}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,168 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "cert-manager.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ template "cert-manager.name" . }}
app.kubernetes.io/name: {{ template "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- with .Values.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- with .Values.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
labels:
app: {{ template "cert-manager.name" . }}
app.kubernetes.io/name: {{ template "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if and .Values.prometheus.enabled (not .Values.prometheus.servicemonitor.enabled) }}
{{- if not .Values.podAnnotations }}
annotations:
{{- end }}
prometheus.io/path: "/metrics"
prometheus.io/scrape: 'true'
prometheus.io/port: '9402'
{{- end }}
spec:
serviceAccountName: {{ template "cert-manager.serviceAccountName" . }}
{{- if hasKey .Values "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.automountServiceAccountToken }}
{{- end }}
{{- with .Values.global.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-controller
{{- with .Values.image }}
image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
{{- if .Values.global.logLevel }}
- --v={{ .Values.global.logLevel }}
{{- end }}
{{- if .Values.clusterResourceNamespace }}
- --cluster-resource-namespace={{ .Values.clusterResourceNamespace }}
{{- else }}
- --cluster-resource-namespace=$(POD_NAMESPACE)
{{- end }}
{{- with .Values.global.leaderElection }}
- --leader-election-namespace={{ .namespace }}
{{- if .leaseDuration }}
- --leader-election-lease-duration={{ .leaseDuration }}
{{- end }}
{{- if .renewDeadline }}
- --leader-election-renew-deadline={{ .renewDeadline }}
{{- end }}
{{- if .retryPeriod }}
- --leader-election-retry-period={{ .retryPeriod }}
{{- end }}
{{- end }}
{{- with .Values.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.ingressShim }}
{{- if .defaultIssuerName }}
- --default-issuer-name={{ .defaultIssuerName }}
{{- end }}
{{- if .defaultIssuerKind }}
- --default-issuer-kind={{ .defaultIssuerKind }}
{{- end }}
{{- if .defaultIssuerGroup }}
- --default-issuer-group={{ .defaultIssuerGroup }}
{{- end }}
{{- end }}
{{- if .Values.featureGates }}
- --feature-gates={{ .Values.featureGates }}
{{- end }}
ports:
- containerPort: 9402
name: http-metrics
protocol: TCP
{{- with .Values.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.extraEnv }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.http_proxy }}
- name: HTTP_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.https_proxy }}
- name: HTTPS_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.no_proxy }}
- name: NO_PROXY
value: {{ . }}
{{- end }}
{{- with .Values.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.podDnsPolicy }}
dnsPolicy: {{ . }}
{{- end }}
{{- with .Values.podDnsConfig }}
dnsConfig:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,23 @@
{{- if .Values.webhook.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "webhook.fullname" . }}-allow-egress
namespace: {{ include "cert-manager.namespace" . }}
spec:
egress:
{{- with .Values.webhook.networkPolicy.egress }}
{{- toYaml . | nindent 2 }}
{{- end }}
podSelector:
matchLabels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- with .Values.webhook.podLabels }}
{{- toYaml . | nindent 6 }}
{{- end }}
policyTypes:
- Egress
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if .Values.webhook.networkPolicy.enabled }}
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: {{ template "webhook.fullname" . }}-allow-ingress
namespace: {{ include "cert-manager.namespace" . }}
spec:
ingress:
{{- with .Values.webhook.networkPolicy.ingress }}
{{- toYaml . | nindent 2 }}
{{- end }}
podSelector:
matchLabels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- with .Values.webhook.podLabels }}
{{- toYaml . | nindent 6 }}
{{- end }}
policyTypes:
- Ingress
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "cert-manager.fullname" . }}-psp
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "cert-manager.fullname" . }}
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-psp
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}

View File

@ -0,0 +1,49 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "cert-manager.fullname" . }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
{{- if .Values.global.podSecurityPolicy.useAppArmor }}
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
allowedCapabilities: [] # default set of capabilities are implicitly allowed
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
{{- end }}

View File

@ -0,0 +1,545 @@
{{- if .Values.global.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "cert-manager.fullname" . }}:leaderelection
namespace: {{ .Values.global.leaderElection.namespace }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
resourceNames: ["cert-manager-controller"]
verbs: ["get", "update", "patch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create"]
---
# grant cert-manager permission to manage the leaderelection configmap in the
# leader election namespace
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "cert-manager.fullname" . }}:leaderelection
namespace: {{ .Values.global.leaderElection.namespace }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "cert-manager.fullname" . }}:leaderelection
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
---
# Issuer controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-issuers
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["issuers", "issuers/status"]
verbs: ["update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["issuers"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# ClusterIssuer controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["clusterissuers", "clusterissuers/status"]
verbs: ["update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["clusterissuers"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "update", "delete"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Certificates controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-certificates
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificates/status", "certificaterequests", "certificaterequests/status"]
verbs: ["update", "patch"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "clusterissuers", "issuers"]
verbs: ["get", "list", "watch"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["cert-manager.io"]
resources: ["certificates/finalizers", "certificaterequests/finalizers"]
verbs: ["update"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders"]
verbs: ["create", "delete", "get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "create", "update", "delete", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Orders controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-orders
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders", "orders/status"]
verbs: ["update", "patch"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders", "challenges"]
verbs: ["get", "list", "watch"]
- apiGroups: ["cert-manager.io"]
resources: ["clusterissuers", "issuers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges"]
verbs: ["create", "delete"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["acme.cert-manager.io"]
resources: ["orders/finalizers"]
verbs: ["update"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
# Challenges controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-challenges
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
# Use to update challenge resource status
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges", "challenges/status"]
verbs: ["update", "patch"]
# Used to watch challenge resources
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges"]
verbs: ["get", "list", "watch"]
# Used to watch challenges, issuer and clusterissuer resources
- apiGroups: ["cert-manager.io"]
resources: ["issuers", "clusterissuers"]
verbs: ["get", "list", "watch"]
# Need to be able to retrieve ACME account private key to complete challenges
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
# Used to create events
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
# HTTP01 rules
- apiGroups: [""]
resources: ["pods", "services"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch", "create", "delete", "update"]
- apiGroups: [ "gateway.networking.k8s.io" ]
resources: [ "httproutes" ]
verbs: ["get", "list", "watch", "create", "delete", "update"]
# We require the ability to specify a custom hostname when we are creating
# new ingress resources.
# See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148
- apiGroups: ["route.openshift.io"]
resources: ["routes/custom-host"]
verbs: ["create"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges/finalizers"]
verbs: ["update"]
# DNS01 rules (duplicated above)
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
---
# ingress-shim controller role
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests"]
verbs: ["create", "update", "delete"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "issuers", "clusterissuers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses"]
verbs: ["get", "list", "watch"]
# We require these rules to support users with the OwnerReferencesPermissionEnforcement
# admission controller enabled:
# https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement
- apiGroups: ["networking.k8s.io"]
resources: ["ingresses/finalizers"]
verbs: ["update"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["gateways", "httproutes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["gateway.networking.k8s.io"]
resources: ["gateways/finalizers", "httproutes/finalizers"]
verbs: ["update"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "patch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-issuers
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-issuers
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-clusterissuers
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-certificates
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-certificates
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-orders
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-orders
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-challenges
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-challenges
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-ingress-shim
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-view
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- if .Values.global.rbac.aggregateClusterRoles }}
rbac.authorization.k8s.io/aggregate-to-view: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
{{- end }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "issuers"]
verbs: ["get", "list", "watch"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges", "orders"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-edit
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- if .Values.global.rbac.aggregateClusterRoles }}
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-admin: "true"
{{- end }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates", "certificaterequests", "issuers"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
- apiGroups: ["cert-manager.io"]
resources: ["certificates/status"]
verbs: ["update"]
- apiGroups: ["acme.cert-manager.io"]
resources: ["challenges", "orders"]
verbs: ["create", "delete", "deletecollection", "patch", "update"]
---
# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-approve:cert-manager-io
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cert-manager"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["signers"]
verbs: ["approve"]
resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-approve:cert-manager-io
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cert-manager"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-approve:cert-manager-io
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
---
# Permission to:
# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers
# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-certificatesigningrequests
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cert-manager"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["certificates.k8s.io"]
resources: ["certificatesigningrequests/status"]
verbs: ["update", "patch"]
- apiGroups: ["certificates.k8s.io"]
resources: ["signers"]
resourceNames: ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"]
verbs: ["sign"]
- apiGroups: ["authorization.k8s.io"]
resources: ["subjectaccessreviews"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "cert-manager.fullname" . }}-controller-certificatesigningrequests
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "cert-manager"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "cert-manager.fullname" . }}-controller-certificatesigningrequests
subjects:
- name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
kind: ServiceAccount
{{- end }}

View File

@ -0,0 +1,31 @@
{{- if .Values.prometheus.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "cert-manager.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.serviceAnnotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- with .Values.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: ClusterIP
ports:
- protocol: TCP
port: 9402
name: tcp-prometheus-servicemonitor
targetPort: {{ .Values.prometheus.servicemonitor.targetPort }}
selector:
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- end }}

View File

@ -0,0 +1,25 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }}
metadata:
name: {{ template "cert-manager.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
{{- with .Values.serviceAccount.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,45 @@
{{- if and .Values.prometheus.enabled .Values.prometheus.servicemonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "cert-manager.fullname" . }}
{{- if .Values.prometheus.servicemonitor.namespace }}
namespace: {{ .Values.prometheus.servicemonitor.namespace }}
{{- else }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
labels:
app: {{ include "cert-manager.name" . }}
app.kubernetes.io/name: {{ include "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- include "labels" . | nindent 4 }}
prometheus: {{ .Values.prometheus.servicemonitor.prometheusInstance }}
{{- with .Values.prometheus.servicemonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.prometheus.servicemonitor.annotations }}
annotations:
{{- with .Values.prometheus.servicemonitor.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
spec:
jobLabel: {{ template "cert-manager.fullname" . }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "cert-manager.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "controller"
{{- if .Values.prometheus.servicemonitor.namespace }}
namespaceSelector:
matchNames:
- {{ include "cert-manager.namespace" . }}
{{- end }}
endpoints:
- targetPort: {{ .Values.prometheus.servicemonitor.targetPort }}
path: {{ .Values.prometheus.servicemonitor.path }}
interval: {{ .Values.prometheus.servicemonitor.interval }}
scrapeTimeout: {{ .Values.prometheus.servicemonitor.scrapeTimeout }}
honorLabels: {{ .Values.prometheus.servicemonitor.honorLabels }}
{{- end }}

View File

@ -0,0 +1,77 @@
{{- if .Values.startupapicheck.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "startupapicheck.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.jobAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
backoffLimit: {{ .Values.startupapicheck.backoffLimit }}
template:
metadata:
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 8 }}
{{- with .Values.startupapicheck.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.startupapicheck.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
restartPolicy: OnFailure
serviceAccountName: {{ template "startupapicheck.serviceAccountName" . }}
{{- with .Values.global.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.startupapicheck.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: {{ .Chart.Name }}-startupapicheck
{{- with .Values.startupapicheck.image }}
image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.startupapicheck.image.pullPolicy }}
args:
- check
- api
- --wait={{ .Values.startupapicheck.timeout }}
{{- with .Values.startupapicheck.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- with .Values.startupapicheck.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.startupapicheck.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.startupapicheck.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.startupapicheck.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.startupapicheck.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,24 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "startupapicheck.fullname" . }}-psp
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "startupapicheck.fullname" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "startupapicheck.fullname" . }}-psp
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "startupapicheck.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "startupapicheck.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,51 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "startupapicheck.fullname" . }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
{{- if .Values.global.podSecurityPolicy.useAppArmor }}
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
{{- end }}
{{- with .Values.startupapicheck.rbac.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
allowedCapabilities: [] # default set of capabilities are implicitly allowed
volumes:
- 'projected'
- 'secret'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
{{- end }}
{{- end }}

View File

@ -0,0 +1,48 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.global.rbac.create }}
# create certificate role
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "startupapicheck.fullname" . }}:create-cert
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
rules:
- apiGroups: ["cert-manager.io"]
resources: ["certificates"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "startupapicheck.fullname" . }}:create-cert
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.rbac.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "startupapicheck.fullname" . }}:create-cert
subjects:
- kind: ServiceAccount
name: {{ template "startupapicheck.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,27 @@
{{- if .Values.startupapicheck.enabled }}
{{- if .Values.startupapicheck.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: {{ .Values.startupapicheck.serviceAccount.automountServiceAccountToken }}
metadata:
name: {{ template "startupapicheck.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.startupapicheck.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app: {{ include "startupapicheck.name" . }}
app.kubernetes.io/name: {{ include "startupapicheck.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "startupapicheck"
{{- include "labels" . | nindent 4 }}
{{- with .Values.startupapicheck.serviceAccount.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,24 @@
{{- if .Values.webhook.config -}}
{{- if not .Values.webhook.config.apiVersion -}}
{{- fail "webhook.config.apiVersion must be set" -}}
{{- end -}}
{{- if not .Values.webhook.config.kind -}}
{{- fail "webhook.config.kind must be set" -}}
{{- end -}}
{{- end -}}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "webhook.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
data:
{{- if .Values.webhook.config }}
config.yaml: |
{{ .Values.webhook.config | toYaml | nindent 4 }}
{{- end }}

View File

@ -0,0 +1,172 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "webhook.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
{{- with .Values.webhook.deploymentAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.webhook.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- with .Values.webhook.strategy }}
strategy:
{{- toYaml . | nindent 4 }}
{{- end }}
template:
metadata:
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 8 }}
{{- with .Values.webhook.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
serviceAccountName: {{ template "webhook.serviceAccountName" . }}
{{- if hasKey .Values.webhook "automountServiceAccountToken" }}
automountServiceAccountToken: {{ .Values.webhook.automountServiceAccountToken }}
{{- end }}
{{- with .Values.global.priorityClassName }}
priorityClassName: {{ . | quote }}
{{- end }}
{{- with .Values.webhook.securityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.webhook.hostNetwork }}
hostNetwork: true
{{- end }}
containers:
- name: {{ .Chart.Name }}-webhook
{{- with .Values.webhook.image }}
image: "{{- if .registry -}}{{ .registry }}/{{- end -}}{{ .repository }}{{- if (.digest) -}} @{{ .digest }}{{- else -}}:{{ default $.Chart.AppVersion .tag }} {{- end -}}"
{{- end }}
imagePullPolicy: {{ .Values.webhook.image.pullPolicy }}
args:
{{- if .Values.global.logLevel }}
- --v={{ .Values.global.logLevel }}
{{- end }}
{{- if .Values.webhook.config }}
- --config=/var/cert-manager/config/config.yaml
{{- end }}
{{- $config := default .Values.webhook.config "" }}
{{ if not $config.securePort -}}
- --secure-port={{ .Values.webhook.securePort }}
{{- end }}
{{- $tlsConfig := default $config.tlsConfig "" }}
{{ if or (not $config.tlsConfig) (and (not $tlsConfig.dynamic) (not $tlsConfig.filesystem) ) -}}
- --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE)
- --dynamic-serving-ca-secret-name={{ template "webhook.fullname" . }}-ca
- --dynamic-serving-dns-names={{ template "webhook.fullname" . }}
- --dynamic-serving-dns-names={{ template "webhook.fullname" . }}.$(POD_NAMESPACE)
- --dynamic-serving-dns-names={{ template "webhook.fullname" . }}.$(POD_NAMESPACE).svc
{{ if .Values.webhook.url.host }}
- --dynamic-serving-dns-names={{ .Values.webhook.url.host }}
{{- end }}
{{- end }}
{{- with .Values.webhook.extraArgs }}
{{- toYaml . | nindent 10 }}
{{- end }}
ports:
- name: https
protocol: TCP
{{- if $config.securePort }}
containerPort: {{ $config.securePort }}
{{- else if .Values.webhook.securePort }}
containerPort: {{ .Values.webhook.securePort }}
{{- else }}
containerPort: 6443
{{- end }}
- name: healthcheck
protocol: TCP
{{- if $config.healthzPort }}
containerPort: {{ $config.healthzPort }}
{{- else }}
containerPort: 6080
{{- end }}
livenessProbe:
httpGet:
path: /livez
{{- if $config.healthzPort }}
port: {{ $config.healthzPort }}
{{- else }}
port: 6080
{{- end }}
scheme: HTTP
initialDelaySeconds: {{ .Values.webhook.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.webhook.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.webhook.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.webhook.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.webhook.livenessProbe.failureThreshold }}
readinessProbe:
httpGet:
path: /healthz
{{- if $config.healthzPort }}
port: {{ $config.healthzPort }}
{{- else }}
port: 6080
{{- end }}
scheme: HTTP
initialDelaySeconds: {{ .Values.webhook.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.webhook.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.webhook.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.webhook.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.webhook.readinessProbe.failureThreshold }}
{{- with .Values.webhook.containerSecurityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.webhook.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.webhook.config }}
volumeMounts:
- name: config
mountPath: /var/cert-manager/config
{{- end }}
{{- with .Values.webhook.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.webhook.topologySpreadConstraints }}
topologySpreadConstraints:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.webhook.config }}
volumes:
- name: config
configMap:
name: {{ include "webhook.fullname" . }}
{{- end }}

View File

@ -0,0 +1,46 @@
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: {{ include "webhook.fullname" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
annotations:
cert-manager.io/inject-ca-from-secret: {{ printf "%s/%s-ca" (include "cert-manager.namespace" .) (include "webhook.fullname" .) | quote }}
{{- with .Values.webhook.mutatingWebhookConfigurationAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
webhooks:
- name: webhook.cert-manager.io
rules:
- apiGroups:
- "cert-manager.io"
- "acme.cert-manager.io"
apiVersions:
- "v1"
operations:
- CREATE
- UPDATE
resources:
- "*/*"
admissionReviewVersions: ["v1"]
# This webhook only accepts v1 cert-manager resources.
# Equivalent matchPolicy ensures that non-v1 resource requests are sent to
# this webhook (after the resources have been converted to v1).
matchPolicy: Equivalent
timeoutSeconds: {{ .Values.webhook.timeoutSeconds }}
failurePolicy: Fail
# Only include 'sideEffects' field in Kubernetes 1.12+
sideEffects: None
clientConfig:
{{- if .Values.webhook.url.host }}
url: https://{{ .Values.webhook.url.host }}/mutate
{{- else }}
service:
name: {{ template "webhook.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
path: /mutate
{{- end }}

View File

@ -0,0 +1,18 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "webhook.fullname" . }}-psp
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ['policy']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames:
- {{ template "webhook.fullname" . }}
{{- end }}

View File

@ -0,0 +1,20 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "webhook.fullname" . }}-psp
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "webhook.fullname" . }}-psp
subjects:
- kind: ServiceAccount
name: {{ template "webhook.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}

View File

@ -0,0 +1,54 @@
{{- if .Values.global.podSecurityPolicy.enabled }}
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: {{ template "webhook.fullname" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
{{- if .Values.global.podSecurityPolicy.useAppArmor }}
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
{{- end }}
spec:
privileged: false
allowPrivilegeEscalation: false
allowedCapabilities: [] # default set of capabilities are implicitly allowed
volumes:
- 'configMap'
- 'emptyDir'
- 'projected'
- 'secret'
- 'downwardAPI'
hostNetwork: {{ .Values.webhook.hostNetwork }}
{{- if .Values.webhook.hostNetwork }}
hostPorts:
- max: {{ .Values.webhook.securePort }}
min: {{ .Values.webhook.securePort }}
{{- end }}
hostIPC: false
hostPID: false
runAsUser:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
fsGroup:
rule: 'MustRunAs'
ranges:
- min: 1000
max: 1000
{{- end }}

View File

@ -0,0 +1,83 @@
{{- if .Values.global.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "webhook.fullname" . }}:dynamic-serving
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: [""]
resources: ["secrets"]
resourceNames:
- '{{ template "webhook.fullname" . }}-ca'
verbs: ["get", "list", "watch", "update"]
# It's not possible to grant CREATE permission on a single resourceName.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ template "webhook.fullname" . }}:dynamic-serving
namespace: {{ include "cert-manager.namespace" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "webhook.fullname" . }}:dynamic-serving
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ template "webhook.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "webhook.fullname" . }}:subjectaccessreviews
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
rules:
- apiGroups: ["authorization.k8s.io"]
resources: ["subjectaccessreviews"]
verbs: ["create"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: {{ template "webhook.fullname" . }}:subjectaccessreviews
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "webhook.fullname" . }}:subjectaccessreviews
subjects:
- apiGroup: ""
kind: ServiceAccount
name: {{ template "webhook.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- end }}

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "webhook.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.webhook.serviceAnnotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
{{- with .Values.webhook.serviceLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
type: {{ .Values.webhook.serviceType }}
{{- with .Values.webhook.loadBalancerIP }}
loadBalancerIP: {{ . }}
{{- end }}
ports:
- name: https
port: 443
protocol: TCP
targetPort: "https"
selector:
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"

View File

@ -0,0 +1,25 @@
{{- if .Values.webhook.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
automountServiceAccountToken: {{ .Values.webhook.serviceAccount.automountServiceAccountToken }}
metadata:
name: {{ template "webhook.serviceAccountName" . }}
namespace: {{ include "cert-manager.namespace" . }}
{{- with .Values.webhook.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
{{- with .Values.webhook.serviceAccount.labels }}
{{ toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 2 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,55 @@
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: {{ include "webhook.fullname" . }}
labels:
app: {{ include "webhook.name" . }}
app.kubernetes.io/name: {{ include "webhook.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: "webhook"
{{- include "labels" . | nindent 4 }}
annotations:
cert-manager.io/inject-ca-from-secret: {{ printf "%s/%s-ca" (include "cert-manager.namespace" .) (include "webhook.fullname" .) | quote}}
{{- with .Values.webhook.validatingWebhookConfigurationAnnotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
webhooks:
- name: webhook.cert-manager.io
namespaceSelector:
matchExpressions:
- key: "cert-manager.io/disable-validation"
operator: "NotIn"
values:
- "true"
- key: "name"
operator: "NotIn"
values:
- {{ include "cert-manager.namespace" . }}
rules:
- apiGroups:
- "cert-manager.io"
- "acme.cert-manager.io"
apiVersions:
- "v1"
operations:
- CREATE
- UPDATE
resources:
- "*/*"
admissionReviewVersions: ["v1"]
# This webhook only accepts v1 cert-manager resources.
# Equivalent matchPolicy ensures that non-v1 resource requests are sent to
# this webhook (after the resources have been converted to v1).
matchPolicy: Equivalent
timeoutSeconds: {{ .Values.webhook.timeoutSeconds }}
failurePolicy: Fail
sideEffects: None
clientConfig:
{{- if .Values.webhook.url.host }}
url: https://{{ .Values.webhook.url.host }}/validate
{{- else }}
service:
name: {{ template "webhook.fullname" . }}
namespace: {{ include "cert-manager.namespace" . }}
path: /validate
{{- end }}

View File

@ -0,0 +1,602 @@
# Default values for cert-manager.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
global:
# Reference to one or more secrets to be used when pulling images
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
# - name: "image-pull-secret"
# Labels to apply to all resources
# Please note that this does not add labels to the resources created dynamically by the controllers.
# For these resources, you have to add the labels in the template in the cert-manager custom resource:
# eg. podTemplate/ ingressTemplate in ACMEChallengeSolverHTTP01Ingress
# ref: https://cert-manager.io/docs/reference/api-docs/#acme.cert-manager.io/v1.ACMEChallengeSolverHTTP01Ingress
# eg. secretTemplate in CertificateSpec
# ref: https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
commonLabels: {}
# team_name: dev
# Optional priority class to be used for the cert-manager pods
priorityClassName: ""
rbac:
create: true
# Aggregate ClusterRoles to Kubernetes default user-facing roles. Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles
aggregateClusterRoles: true
podSecurityPolicy:
enabled: false
useAppArmor: true
# Set the verbosity of cert-manager. Range of 0 - 6 with 6 being the most verbose.
logLevel: 2
leaderElection:
# Override the namespace used for the leader election lease
namespace: "kube-system"
# The duration that non-leader candidates will wait after observing a
# leadership renewal until attempting to acquire leadership of a led but
# unrenewed leader slot. This is effectively the maximum duration that a
# leader can be stopped before it is replaced by another candidate.
# leaseDuration: 60s
# The interval between attempts by the acting master to renew a leadership
# slot before it stops leading. This must be less than or equal to the
# lease duration.
# renewDeadline: 40s
# The duration the clients should wait between attempting acquisition and
# renewal of a leadership.
# retryPeriod: 15s
installCRDs: false
replicaCount: 1
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Comma separated list of feature gates that should be enabled on the
# controller pod.
featureGates: ""
image:
repository: quay.io/jetstack/cert-manager-controller
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-controller
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer
# resources. By default, the same namespace as cert-manager is deployed within is
# used. This namespace will not be automatically created by the Helm chart.
clusterResourceNamespace: ""
# This namespace allows you to define where the services will be installed into
# if not set then they will use the namespace of the release
# This is helpful when installing cert manager as a chart dependency (sub chart)
namespace: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
# Optional additional labels to add to the controller's ServiceAccount
# labels: {}
automountServiceAccountToken: true
# Automounting API credentials for a particular pod
# automountServiceAccountToken: true
# Additional command line flags to pass to cert-manager controller binary.
# To see all available flags run docker run quay.io/jetstack/cert-manager-controller:<version> --help
extraArgs: []
# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted
# - --enable-certificate-owner-ref=true
# Use this flag to enabled or disable arbitrary controllers, for example, disable the CertificiateRequests approver
# - --controllers=*,-certificaterequests-approver
extraEnv: []
# - name: SOME_VAR
# value: 'some value'
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
# Pod Security Context
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# Container Security Context to be set on the controller component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
volumes: []
volumeMounts: []
# Optional additional annotations to add to the controller Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the controller Pods
# podAnnotations: {}
podLabels: {}
# Optional annotations to add to the controller Service
# serviceAnnotations: {}
# Optional additional labels to add to the controller Service
# serviceLabels: {}
# Optional DNS settings, useful if you have a public and private DNS zone for
# the same domain on Route 53. What follows is an example of ensuring
# cert-manager can access an ingress or DNS TXT records at all times.
# NOTE: This requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for
# the cluster to work.
# podDnsPolicy: "None"
# podDnsConfig:
# nameservers:
# - "1.1.1.1"
# - "8.8.8.8"
nodeSelector:
kubernetes.io/os: linux
ingressShim: {}
# defaultIssuerName: ""
# defaultIssuerKind: ""
# defaultIssuerGroup: ""
prometheus:
enabled: true
servicemonitor:
enabled: false
prometheusInstance: default
targetPort: 9402
path: /metrics
interval: 60s
scrapeTimeout: 30s
labels: {}
annotations: {}
honorLabels: false
# Use these variables to configure the HTTP_PROXY environment variables
# http_proxy: "http://proxy:8080"
# https_proxy: "https://proxy:8080"
# no_proxy: 127.0.0.1,localhost
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#affinity-v1-core
# for example:
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: foo.bar.com/role
# operator: In
# values:
# - master
affinity: {}
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#toleration-v1-core
# for example:
# tolerations:
# - key: foo.bar.com/role
# operator: Equal
# value: master
# effect: NoSchedule
tolerations: []
# expects input structure as per specification https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core
# for example:
# topologySpreadConstraints:
# - maxSkew: 2
# topologyKey: topology.kubernetes.io/zone
# whenUnsatisfiable: ScheduleAnyway
# labelSelector:
# matchLabels:
# app.kubernetes.io/instance: cert-manager
# app.kubernetes.io/component: controller
topologySpreadConstraints: []
webhook:
replicaCount: 1
timeoutSeconds: 10
# Used to configure options for the webhook pod.
# This allows setting options that'd usually be provided via flags.
# An APIVersion and Kind must be specified in your values.yaml file.
# Flags will override options that are set here.
config:
# apiVersion: webhook.config.cert-manager.io/v1alpha1
# kind: WebhookConfiguration
# The port that the webhook should listen on for requests.
# In GKE private clusters, by default kubernetes apiservers are allowed to
# talk to the cluster nodes only on 443 and 10250. so configuring
# securePort: 10250, will work out of the box without needing to add firewall
# rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000.
# This should be uncommented and set as a default by the chart once we graduate
# the apiVersion of WebhookConfiguration past v1alpha1.
# securePort: 10250
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Pod Security Context to be set on the webhook component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# Container Security Context to be set on the webhook component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# Optional additional annotations to add to the webhook Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the webhook Pods
# podAnnotations: {}
# Optional additional annotations to add to the webhook Service
# serviceAnnotations: {}
# Optional additional annotations to add to the webhook MutatingWebhookConfiguration
# mutatingWebhookConfigurationAnnotations: {}
# Optional additional annotations to add to the webhook ValidatingWebhookConfiguration
# validatingWebhookConfigurationAnnotations: {}
# Additional command line flags to pass to cert-manager webhook binary.
# To see all available flags run docker run quay.io/jetstack/cert-manager-webhook:<version> --help
extraArgs: []
# Path to a file containing a WebhookConfiguration object used to configure the webhook
# - --config=<path-to-config-file>
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
## Liveness and readiness probe values
## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes
##
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
nodeSelector:
kubernetes.io/os: linux
affinity: {}
tolerations: []
topologySpreadConstraints: []
# Optional additional labels to add to the Webhook Pods
podLabels: {}
# Optional additional labels to add to the Webhook Service
serviceLabels: {}
image:
repository: quay.io/jetstack/cert-manager-webhook
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-webhook
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Optional additional labels to add to the webhook's ServiceAccount
# labels: {}
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# Automounting API credentials for a particular pod
# automountServiceAccountToken: true
# The port that the webhook should listen on for requests.
# In GKE private clusters, by default kubernetes apiservers are allowed to
# talk to the cluster nodes only on 443 and 10250. so configuring
# securePort: 10250, will work out of the box without needing to add firewall
# rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000
securePort: 10250
# Specifies if the webhook should be started in hostNetwork mode.
#
# Required for use in some managed kubernetes clusters (such as AWS EKS) with custom
# CNI (such as calico), because control-plane managed by AWS cannot communicate
# with pods' IP CIDR and admission webhooks are not working
#
# Since the default port for the webhook conflicts with kubelet on the host
# network, `webhook.securePort` should be changed to an available port if
# running in hostNetwork mode.
hostNetwork: false
# Specifies how the service should be handled. Useful if you want to expose the
# webhook to outside of the cluster. In some cases, the control plane cannot
# reach internal services.
serviceType: ClusterIP
# loadBalancerIP:
# Overrides the mutating webhook and validating webhook so they reach the webhook
# service using the `url` field instead of a service.
url: {}
# host:
# Enables default network policies for webhooks.
networkPolicy:
enabled: false
ingress:
- from:
- ipBlock:
cidr: 0.0.0.0/0
egress:
- ports:
- port: 80
protocol: TCP
- port: 443
protocol: TCP
- port: 53
protocol: TCP
- port: 53
protocol: UDP
to:
- ipBlock:
cidr: 0.0.0.0/0
cainjector:
enabled: true
replicaCount: 1
strategy: {}
# type: RollingUpdate
# rollingUpdate:
# maxSurge: 0
# maxUnavailable: 1
# Pod Security Context to be set on the cainjector component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# Container Security Context to be set on the cainjector component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# Optional additional annotations to add to the cainjector Deployment
# deploymentAnnotations: {}
# Optional additional annotations to add to the cainjector Pods
# podAnnotations: {}
# Additional command line flags to pass to cert-manager cainjector binary.
# To see all available flags run docker run quay.io/jetstack/cert-manager-cainjector:<version> --help
extraArgs: []
# Enable profiling for cainjector
# - --enable-profiling=true
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector:
kubernetes.io/os: linux
affinity: {}
tolerations: []
topologySpreadConstraints: []
# Optional additional labels to add to the CA Injector Pods
podLabels: {}
image:
repository: quay.io/jetstack/cert-manager-cainjector
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-cainjector
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the controller's ServiceAccount
# annotations: {}
# Automount API credentials for a Service Account.
# Optional additional labels to add to the cainjector's ServiceAccount
# labels: {}
automountServiceAccountToken: true
# Automounting API credentials for a particular pod
# automountServiceAccountToken: true
# This startupapicheck is a Helm post-install hook that waits for the webhook
# endpoints to become available.
# The check is implemented using a Kubernetes Job- if you are injecting mesh
# sidecar proxies into cert-manager pods, you probably want to ensure that they
# are not injected into this Job's pod. Otherwise the installation may time out
# due to the Job never being completed because the sidecar proxy does not exit.
# See https://github.com/cert-manager/cert-manager/pull/4414 for context.
startupapicheck:
enabled: true
# Pod Security Context to be set on the startupapicheck component Pod
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# Container Security Context to be set on the controller component container
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
containerSecurityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# Timeout for 'kubectl check api' command
timeout: 1m
# Job backoffLimit
backoffLimit: 4
# Optional additional annotations to add to the startupapicheck Job
jobAnnotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "1"
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
# Optional additional annotations to add to the startupapicheck Pods
# podAnnotations: {}
# Additional command line flags to pass to startupapicheck binary.
# To see all available flags run docker run quay.io/jetstack/cert-manager-ctl:<version> --help
extraArgs: []
resources: {}
# requests:
# cpu: 10m
# memory: 32Mi
nodeSelector:
kubernetes.io/os: linux
affinity: {}
tolerations: []
# Optional additional labels to add to the startupapicheck Pods
podLabels: {}
image:
repository: quay.io/jetstack/cert-manager-ctl
# You can manage a registry with
# registry: quay.io
# repository: jetstack/cert-manager-ctl
# Override the image tag to deploy by setting this variable.
# If no value is set, the chart's appVersion will be used.
# tag: canary
# Setting a digest will override any tag
# digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20
pullPolicy: IfNotPresent
rbac:
# annotations for the startup API Check job RBAC and PSP resources
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
serviceAccount:
# Specifies whether a service account should be created
create: true
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
# name: ""
# Optional additional annotations to add to the Job's ServiceAccount
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "-5"
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
# Automount API credentials for a Service Account.
automountServiceAccountToken: true
# Optional additional labels to add to the startupapicheck's ServiceAccount
# labels: {}

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,17 @@
apiVersion: v2
name: constellation-operators
description: A Helm chart for Kubernetes
type: application
version: 2.3.0-pre
dependencies:
- name: node-maintenance-operator
version: 2.3.0-pre
tags:
- Azure
- GCP
- name: constellation-operator
version: 2.3.0-pre
tags:
- Azure
- GCP

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,5 @@
apiVersion: v2
name: constellation-operator
description: A Helm chart for Kubernetes
type: application
version: 2.3.0-pre

View File

@ -0,0 +1,81 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: autoscalingstrategies.update.edgeless.systems
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
spec:
group: update.edgeless.systems
names:
kind: AutoscalingStrategy
listKind: AutoscalingStrategyList
plural: autoscalingstrategies
singular: autoscalingstrategy
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: AutoscalingStrategy is the Schema for the autoscalingstrategies
API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: AutoscalingStrategySpec defines the desired state of AutoscalingStrategy.
properties:
autoscalerExtraArgs:
additionalProperties:
type: string
description: AutoscalerExtraArgs defines extra arguments to be passed
to the autoscaler.
type: object
deploymentName:
description: DeploymentName defines the name of the autoscaler deployment.
type: string
deploymentNamespace:
description: DeploymentNamespace defines the namespace of the autoscaler
deployment.
type: string
enabled:
description: Enabled defines whether cluster autoscaling should be enabled
or not.
type: boolean
required:
- deploymentName
- deploymentNamespace
- enabled
type: object
status:
description: AutoscalingStrategyStatus defines the observed state of AutoscalingStrategy.
properties:
enabled:
description: Enabled shows whether cluster autoscaling is currently
enabled or not.
type: boolean
replicas:
description: Replicas is the number of replicas for the autoscaler deployment.
format: int32
type: integer
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,636 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: nodeimages.update.edgeless.systems
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
spec:
group: update.edgeless.systems
names:
kind: NodeImage
listKind: NodeImageList
plural: nodeimages
singular: nodeimage
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: NodeImage is the Schema for the nodeimages API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NodeImageSpec defines the desired state of NodeImage.
properties:
image:
description: ImageReference is the image to use for all nodes.
type: string
type: object
status:
description: NodeImageStatus defines the observed state of NodeImage.
properties:
budget:
description: Budget is the amount of extra nodes that can be created
as replacements for outdated nodes.
format: int32
type: integer
conditions:
description: Conditions represent the latest available observations
of an object's state
items:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
\n type FooStatus struct{ // Represents the observations of a foo's
current state. // Known .status.conditions.type are: \"Available\",
\"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
// +listType=map // +listMapKey=type Conditions []metav1.Condition
`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating details
about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers of
specific condition types may define expected values and meanings
for this field, and whether the values are considered a guaranteed
API. The value should be a CamelCase string. This field may
not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
donors:
description: Donors is a list of outdated nodes that donate labels to
heirs.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: array
heirs:
description: Heirs is a list of nodes using the latest image that still
need to inherit labels from donors.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: array
invalid:
description: Invalid is a list of invalid nodes (nodes that cannot be
processed by the operator due to missing information or transient
faults).
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: array
mints:
description: Mints is a list of up to date nodes that will become heirs.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: array
obsolete:
description: Obsolete is a list of obsolete nodes (nodes that have been
created by the operator but are no longer needed).
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: array
outdated:
description: Outdated is a list of nodes that are using an outdated
image.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: array
pending:
description: Pending is a list of pending nodes (joining or leaving
the cluster).
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: array
upToDate:
description: UpToDate is a list of nodes that are using the latest image
and labels.
items:
description: "ObjectReference contains enough information to let you
inspect or modify the referred object. --- New uses of this type
are discouraged because of difficulty describing its usage when
embedded in APIs. 1. Ignored fields. It includes many fields which
are not generally honored. For instance, ResourceVersion and FieldPath
are both very rarely valid in actual usage. 2. Invalid usage help.
\ It is impossible to add specific help for individual usage. In
most embedded usages, there are particular restrictions like, \"must
refer only to types A and B\" or \"UID not honored\" or \"name must
be restricted\". Those cannot be well described when embedded. 3.
Inconsistent validation. Because the usages are different, the
validation rules are different by usage, which makes it hard for
users to predict what will happen. 4. The fields are both imprecise
and overly precise. Kind is not a precise mapping to a URL. This
can produce ambiguity during interpretation and require a REST mapping.
\ In most cases, the dependency is on the group,resource tuple and
the version of the actual struct is irrelevant. 5. We cannot easily
change it. Because this type is embedded in many locations, updates
to this type will affect numerous schemas. Don't make new APIs
embed an underspecified API type they do not control. \n Instead
of using this type, create a locally provided and used type that
is well-focused on your reference. For example, ServiceReferences
for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533
."
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
type: array
required:
- budget
- conditions
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,88 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: pendingnodes.update.edgeless.systems
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
spec:
group: update.edgeless.systems
names:
kind: PendingNode
listKind: PendingNodeList
plural: pendingnodes
singular: pendingnode
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: PendingNode is the Schema for the pendingnodes API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: PendingNodeSpec defines the desired state of PendingNode.
properties:
deadline:
description: Deadline is the deadline for reaching the goal state. Joining
nodes will be terminated if the deadline is exceeded. Leaving nodes
will remain as unschedulable to prevent data loss. If not specified,
the node may remain in the pending state indefinitely.
format: date-time
type: string
goal:
description: Goal is the goal of the pending state.
enum:
- Join
- Leave
type: string
groupID:
description: ScalingGroupID is the ID of the group that this node shall
be part of.
type: string
nodeName:
description: NodeName is the kubernetes internal name of the node.
type: string
providerID:
description: ProviderID is the provider ID of the node.
type: string
type: object
status:
description: PendingNodeStatus defines the observed state of PendingNode.
properties:
cspState:
description: CSPNodeState is the state of the node in the cloud.
enum:
- Unknown
- Creating
- Ready
- Stopped
- Terminating
- Terminated
- Failed
type: string
reachedGoal:
description: ReachedGoal is true if the node has reached the goal state.
type: boolean
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,157 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: scalinggroups.update.edgeless.systems
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
spec:
group: update.edgeless.systems
names:
kind: ScalingGroup
listKind: ScalingGroupList
plural: scalinggroups
singular: scalinggroup
scope: Cluster
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: ScalingGroup is the Schema for the scalinggroups API.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ScalingGroupSpec defines the desired state of ScalingGroup.
properties:
autoscalerGroupName:
description: AutoscalerGroupName is name that is expected by the autoscaler.
type: string
autoscaling:
description: Autoscaling specifies wether the scaling group should automatically
scale using the cluster-autoscaler.
type: boolean
groupId:
description: GroupID is the CSP specific, canonical identifier of a
scaling group.
type: string
max:
description: Max is the maximum number of autoscaled nodes in the scaling
group (used by cluster-autoscaler).
format: int32
type: integer
min:
description: Min is the minimum number of nodes in the scaling group
(used by cluster-autoscaler).
format: int32
type: integer
nodeImage:
description: NodeImage is the name of the NodeImage resource.
type: string
role:
description: Role is the role of the nodes in the scaling group.
enum:
- Worker
- ControlPlane
type: string
type: object
status:
description: ScalingGroupStatus defines the observed state of ScalingGroup.
properties:
conditions:
description: Conditions represent the latest available observations
of an object's state.
items:
description: "Condition contains details for one aspect of the current
state of this API Resource. --- This struct is intended for direct
use as an array at the field path .status.conditions. For example,
\n type FooStatus struct{ // Represents the observations of a foo's
current state. // Known .status.conditions.type are: \"Available\",
\"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge
// +listType=map // +listMapKey=type Conditions []metav1.Condition
`json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\"
protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }"
properties:
lastTransitionTime:
description: lastTransitionTime is the last time the condition
transitioned from one status to another. This should be when
the underlying condition changed. If that is not known, then
using the time when the API field changed is acceptable.
format: date-time
type: string
message:
description: message is a human readable message indicating details
about the transition. This may be an empty string.
maxLength: 32768
type: string
observedGeneration:
description: observedGeneration represents the .metadata.generation
that the condition was set based upon. For instance, if .metadata.generation
is currently 12, but the .status.conditions[x].observedGeneration
is 9, the condition is out of date with respect to the current
state of the instance.
format: int64
minimum: 0
type: integer
reason:
description: reason contains a programmatic identifier indicating
the reason for the condition's last transition. Producers of
specific condition types may define expected values and meanings
for this field, and whether the values are considered a guaranteed
API. The value should be a CamelCase string. This field may
not be empty.
maxLength: 1024
minLength: 1
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
type: string
status:
description: status of the condition, one of True, False, Unknown.
enum:
- "True"
- "False"
- Unknown
type: string
type:
description: type of condition in CamelCase or in foo.example.com/CamelCase.
--- Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
maxLength: 316
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
type: string
required:
- lastTransitionTime
- message
- reason
- status
- type
type: object
type: array
imageReference:
description: ImageReference is the image currently used for newly created
nodes in this scaling group.
type: string
required:
- conditions
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,33 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "chart.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "chart.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "chart.labels" -}}
helm.sh/chart: {{ include "chart.chart" . }}
{{ include "chart.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "chart.selectorLabels" -}}
app.kubernetes.io/name: {{ include "chart.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,113 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: constellation-operator-controller-manager
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: constellation-operator-controller-manager
namespace: {{ .Release.Namespace }}
labels:
control-plane: controller-manager
{{- include "chart.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.controllerManager.replicas }}
selector:
matchLabels:
control-plane: controller-manager
{{- include "chart.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
control-plane: controller-manager
{{- include "chart.selectorLabels" . | nindent 8 }}
annotations:
kubectl.kubernetes.io/default-container: manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=0
env:
- name: KUBERNETES_CLUSTER_DOMAIN
value: {{ .Values.kubernetesClusterDomain }}
image: {{ .Values.controllerManager.kubeRbacProxy.image.repository }}:{{ .Values.controllerManager.kubeRbacProxy.image.tag
| default .Chart.AppVersion }}
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
protocol: TCP
resources: {{- toYaml .Values.controllerManager.kubeRbacProxy.resources | nindent
10 }}
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
command:
- /manager
env:
- name: KUBERNETES_CLUSTER_DOMAIN
value: {{ .Values.kubernetesClusterDomain }}
- name: CONSTEL_CSP
value: {{ .Values.csp }}
- name: constellation-uid
value: {{ .Values.constellationUID }}
image: {{ .Values.controllerManager.manager.image }}
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources: {{- toYaml .Values.controllerManager.manager.resources | nindent 10 }}
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- mountPath: /etc/kubernetes/pki/etcd
name: etcd-certs
- mountPath: /etc/azure
name: azureconfig
readOnly: true
- mountPath: /etc/gce
name: gceconf
readOnly: true
nodeSelector:
node-role.kubernetes.io/control-plane: ""
securityContext:
runAsUser: 0
serviceAccountName: constellation-operator-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
volumes:
- hostPath:
path: /etc/kubernetes/pki/etcd
type: Directory
name: etcd-certs
- name: azureconfig
secret:
optional: true
secretName: azureconfig
- configMap:
name: gceconf
optional: true
name: gceconf

View File

@ -0,0 +1,55 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: constellation-operator-leader-election-role
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: constellation-operator-leader-election-rolebinding
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: 'constellation-operator-leader-election-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: '{{ .Release.Namespace }}'

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: constellation-operator-manager-config
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
data:
controller_manager_config.yaml: |
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
health:
healthProbeBindAddress: {{ .Values.managerConfig.controllerManagerConfigYaml.health.healthProbeBindAddress
| quote }}
kind: ControllerManagerConfig
leaderElection:
leaderElect: {{ .Values.managerConfig.controllerManagerConfigYaml.leaderElection.leaderElect
}}
resourceName: {{ .Values.managerConfig.controllerManagerConfigYaml.leaderElection.resourceName
| quote }}
metrics:
bindAddress: {{ .Values.managerConfig.controllerManagerConfigYaml.metrics.bindAddress
| quote }}
webhook:
port: {{ .Values.managerConfig.controllerManagerConfigYaml.webhook.port }}

View File

@ -0,0 +1,177 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-manager-role
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- get
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimage
verbs:
- get
- list
- watch
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: constellation-operator-manager-rolebinding
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'constellation-operator-manager-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: '{{ .Release.Namespace }}'

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-metrics-reader
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: constellation-operator-controller-manager-metrics-service
namespace: {{ .Release.Namespace }}
labels:
control-plane: controller-manager
{{- include "chart.labels" . | nindent 4 }}
spec:
type: {{ .Values.metricsService.type }}
selector:
control-plane: controller-manager
{{- include "chart.selectorLabels" . | nindent 4 }}
ports:
{{- .Values.metricsService.ports | toYaml | nindent 2 -}}

View File

@ -0,0 +1,36 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-proxy-role
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: constellation-operator-proxy-rolebinding
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'constellation-operator-proxy-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: '{{ .Release.Namespace }}'

View File

@ -0,0 +1,43 @@
{
"$schema": "https://json-schema.org/draft-07/schema#",
"properties": {
"controllerManager": {
"description": "Container image to use for the spawned pods.",
"type": "object",
"properties": {
"manager": {
"type": "object",
"properties": {
"image": {
"description": "Container image to use for the spawned pods.",
"type": "string",
"examples": ["k8s.gcr.io/autoscaling/cluster-autoscaler:v1.23.1"]
}
},
"required": [
"image"
]
}
},
"required": [
"manager"
]
},
"csp": {
"description": "CSP to which the chart is deployed.",
"enum": ["Azure", "GCP", "AWS", "QEMU"]
},
"constellationUID": {
"description": "UID for the specific cluster",
"type": "string"
}
},
"required": [
"controllerManager",
"csp",
"constellationUID"
],
"title": "Values",
"type": "object"
}

View File

@ -0,0 +1,40 @@
controllerManager:
kubeRbacProxy:
image:
repository: gcr.io/kubebuilder/kube-rbac-proxy
tag: v0.11.0
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
manager:
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi
replicas: 1
kubernetesClusterDomain: cluster.local
managerConfig:
controllerManagerConfigYaml:
health:
healthProbeBindAddress: :8081
leaderElection:
leaderElect: true
resourceName: 38cc1645.edgeless.systems
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
metricsService:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
type: ClusterIP

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,5 @@
apiVersion: v2
name: node-maintenance-operator
description: A Helm chart for Kubernetes
type: application
version: 2.3.0-pre

View File

@ -0,0 +1,104 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: nodemaintenances.nodemaintenance.medik8s.io
annotations:
cert-manager.io/inject-ca-from: kube-system/node-maintenance-operator-serving-cert
controller-gen.kubebuilder.io/version: v0.9.2
labels:
node-maintenance-operator: ""
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
name: node-maintenance-operator-webhook-service
namespace: kube-system
path: /convert
conversionReviewVersions:
- v1
group: nodemaintenance.medik8s.io
names:
kind: NodeMaintenance
listKind: NodeMaintenanceList
plural: nodemaintenances
shortNames:
- nm
singular: nodemaintenance
scope: Cluster
versions:
- name: v1beta1
schema:
openAPIV3Schema:
description: NodeMaintenance is the Schema for the nodemaintenances API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NodeMaintenanceSpec defines the desired state of NodeMaintenance
properties:
nodeName:
description: Node name to apply maintanance on/off
type: string
reason:
description: Reason for maintanance
type: string
required:
- nodeName
type: object
status:
description: NodeMaintenanceStatus defines the observed state of NodeMaintenance
properties:
drainProgress:
description: Percentage completion of draining the node
type: integer
errorOnLeaseCount:
description: Consecutive number of errors upon obtaining a lease
type: integer
evictionPods:
description: EvictionPods is the total number of pods up for eviction
from the start
type: integer
lastError:
description: LastError represents the latest error if any in the latest
reconciliation
type: string
lastUpdate:
description: The last time the status has been updated
format: date-time
type: string
pendingPods:
description: PendingPods is a list of pending pods for eviction
items:
type: string
type: array
phase:
description: Phase is the represtation of the maintenance progress (Running,Succeeded,Failed)
type: string
totalpods:
description: TotalPods is the total number of all pods on the node from
the start
type: integer
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,33 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "chart.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "chart.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "chart.labels" -}}
helm.sh/chart: {{ include "chart.chart" . }}
{{ include "chart.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "chart.selectorLabels" -}}
app.kubernetes.io/name: {{ include "chart.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,99 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: node-maintenance-operator-controller-manager
namespace: {{ .Release.Namespace }}
labels:
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: node-maintenance-operator-controller-manager
namespace: {{ .Release.Namespace }}
labels:
control-plane: controller-manager
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.controllerManager.replicas }}
selector:
matchLabels:
control-plane: controller-manager
node-maintenance-operator: ""
{{- include "chart.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
control-plane: controller-manager
node-maintenance-operator: ""
{{- include "chart.selectorLabels" . | nindent 8 }}
annotations:
kubectl.kubernetes.io/default-container: manager
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=:8080
- --leader-elect
command:
- /manager
env:
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBERNETES_CLUSTER_DOMAIN
value: {{ .Values.kubernetesClusterDomain }}
image: {{ .Values.controllerManager.manager.image }}
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources: {{- toYaml .Values.controllerManager.manager.resources | nindent 10
}}
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
priorityClassName: system-cluster-critical
securityContext:
runAsNonRoot: true
serviceAccountName: node-maintenance-operator-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert

View File

@ -0,0 +1,57 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: node-maintenance-operator-leader-election-role
namespace: {{ .Release.Namespace }}
labels:
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: node-maintenance-operator-leader-election-rolebinding
namespace: {{ .Release.Namespace }}
labels:
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: 'node-maintenance-operator-leader-election-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: '{{ .Release.Namespace }}'

View File

@ -0,0 +1,125 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-manager-role
namespace: {{ .Release.Namespace }}
labels:
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances/finalizers
verbs:
- update
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances/status
verbs:
- get
- patch
- update
- apiGroups:
- oauth.openshift.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-maintenance-operator-manager-rolebinding
namespace: {{ .Release.Namespace }}
labels:
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'node-maintenance-operator-manager-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: '{{ .Release.Namespace }}'

View File

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-metrics-reader
namespace: {{ .Release.Namespace }}
labels:
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: node-maintenance-operator-controller-manager-metrics-service
namespace: {{ .Release.Namespace }}
labels:
control-plane: controller-manager
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
spec:
type: {{ .Values.metricsService.type }}
selector:
control-plane: controller-manager
node-maintenance-operator: ""
{{- include "chart.selectorLabels" . | nindent 4 }}
ports:
{{- .Values.metricsService.ports | toYaml | nindent 2 -}}

View File

@ -0,0 +1,38 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-proxy-role
namespace: {{ .Release.Namespace }}
labels:
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-maintenance-operator-proxy-rolebinding
namespace: {{ .Release.Namespace }}
labels:
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'node-maintenance-operator-proxy-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: '{{ .Release.Namespace }}'

View File

@ -0,0 +1,9 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: node-maintenance-operator-selfsigned-issuer
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
spec:
selfSigned: {}

View File

@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: node-maintenance-operator-serving-cert
namespace: {{ .Release.Namespace }}
labels:
{{- include "chart.labels" . | nindent 4 }}
spec:
dnsNames:
- 'node-maintenance-operator-webhook-service.{{ .Release.Namespace }}.svc'
- 'node-maintenance-operator-webhook-service.{{ .Release.Namespace }}.svc.{{
.Values.kubernetesClusterDomain }}'
issuerRef:
kind: Issuer
name: node-maintenance-operator-selfsigned-issuer
secretName: webhook-server-cert

View File

@ -0,0 +1,31 @@
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: node-maintenance-operator-validating-webhook-configuration
namespace: {{ .Release.Namespace }}
annotations:
cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/node-maintenance-operator-serving-cert
labels:
{{- include "chart.labels" . | nindent 4 }}
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: node-maintenance-operator-webhook-service
namespace: {{ .Release.Namespace }}
path: /validate-nodemaintenance-medik8s-io-v1beta1-nodemaintenance
failurePolicy: Fail
name: vnodemaintenance.kb.io
rules:
- apiGroups:
- nodemaintenance.medik8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- nodemaintenances
sideEffects: None
timeoutSeconds: 15

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: node-maintenance-operator-webhook-service
namespace: {{ .Release.Namespace }}
labels:
node-maintenance-operator: ""
{{- include "chart.labels" . | nindent 4 }}
spec:
type: {{ .Values.webhookService.type }}
selector:
control-plane: controller-manager
node-maintenance-operator: ""
{{- include "chart.selectorLabels" . | nindent 4 }}
ports:
{{- .Values.webhookService.ports | toYaml | nindent 2 -}}

View File

@ -0,0 +1,33 @@
{
"$schema": "https://json-schema.org/draft-07/schema#",
"properties": {
"controllerManager": {
"description": "Container image to use for the spawned pods.",
"type": "object",
"properties": {
"manager": {
"type": "object",
"properties": {
"image": {
"description": "Container image to use for the spawned pods.",
"type": "string",
"examples": ["k8s.gcr.io/autoscaling/cluster-autoscaler:v1.23.1"]
}
},
"required": [
"image"
]
}
},
"required": [
"manager"
]
}
},
"required": [
"controllerManager"
],
"title": "Values",
"type": "object"
}

View File

@ -0,0 +1,24 @@
controllerManager:
manager:
resources:
limits:
cpu: 200m
memory: 100Mi
requests:
cpu: 100m
memory: 20Mi
replicas: 1
kubernetesClusterDomain: cluster.local
metricsService:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
type: ClusterIP
webhookService:
ports:
- port: 443
protocol: TCP
targetPort: 9443
type: ClusterIP

View File

@ -0,0 +1,6 @@
# Set one of the tags to true to indicate which CSP you are deploying to.
tags:
Azure: false
GCP: false
AWS: false
QEMU: false

View File

@ -0,0 +1,3 @@
#!/usr/bin/env bash
helm pull cert-manager --version 1.10.0 --repo https://charts.jetstack.io --untar --untardir charts && rm -rf charts/cert-manager/README.md charts/cert-manager-v1.10.0.tgz

View File

@ -29,10 +29,10 @@ import (
"helm.sh/helm/v3/pkg/chartutil"
)
// Run `go generate` to deterministically create the patched Helm deployment for cilium
// Run `go generate` to download (and patch) upstream helm charts.
//go:generate ./generateCilium.sh
// Run `go generate` to load CSI driver charts from the CSI repositories
//go:generate ./update-csi-charts.sh
//go:generate ./generateCertManager.sh
//go:embed all:charts/*
var helmFS embed.FS
@ -77,11 +77,21 @@ func (i *ChartLoader) Load(config *config.Config, conformanceMode bool, masterSe
return nil, fmt.Errorf("loading cilium: %w", err)
}
certManagerRelease, err := i.loadCertManager()
if err != nil {
return nil, fmt.Errorf("loading cilium: %w", err)
}
operatorRelease, err := i.loadOperators(csp)
if err != nil {
return nil, fmt.Errorf("loading operators: %w", err)
}
conServicesRelease, err := i.loadConstellationServices(config, masterSecret, salt)
if err != nil {
return nil, fmt.Errorf("loading constellation-services: %w", err)
}
releases := helm.Releases{Cilium: ciliumRelease, ConstellationServices: conServicesRelease}
releases := helm.Releases{Cilium: ciliumRelease, CertManager: certManagerRelease, Operators: operatorRelease, ConstellationServices: conServicesRelease}
rel, err := json.Marshal(releases)
if err != nil {
@ -91,62 +101,253 @@ func (i *ChartLoader) Load(config *config.Config, conformanceMode bool, masterSe
}
func (i *ChartLoader) loadCilium(csp cloudprovider.Provider, conformanceMode bool) (helm.Release, error) {
chart, err := loadChartsDir(helmFS, "charts/cilium")
chart, values, err := i.loadCiliumHelper(csp, conformanceMode)
if err != nil {
return helm.Release{}, fmt.Errorf("loading cilium chart: %w", err)
return helm.Release{}, err
}
chartRaw, err := i.marshalChart(chart)
if err != nil {
return helm.Release{}, fmt.Errorf("packaging chart: %w", err)
return helm.Release{}, fmt.Errorf("packaging cilium chart: %w", err)
}
var ciliumVals map[string]any
return helm.Release{Chart: chartRaw, Values: values, ReleaseName: "cilium", Wait: false}, nil
}
// loadCiliumHelper is used to separate the marshalling step from the loading step.
// This reduces the time unit tests take to execute.
func (i *ChartLoader) loadCiliumHelper(csp cloudprovider.Provider, conformanceMode bool) (*chart.Chart, map[string]any, error) {
chart, err := loadChartsDir(helmFS, "charts/cilium")
if err != nil {
return nil, nil, fmt.Errorf("loading cilium chart: %w", err)
}
var values map[string]any
switch csp {
case cloudprovider.AWS:
ciliumVals = awsVals
values = awsVals
case cloudprovider.Azure:
ciliumVals = azureVals
values = azureVals
case cloudprovider.GCP:
ciliumVals = gcpVals
values = gcpVals
case cloudprovider.QEMU:
ciliumVals = qemuVals
values = qemuVals
default:
return helm.Release{}, fmt.Errorf("unknown csp: %s", csp)
return nil, nil, fmt.Errorf("unknown csp: %s", csp)
}
if conformanceMode {
ciliumVals["kubeProxyReplacementHealthzBindAddr"] = ""
ciliumVals["kubeProxyReplacement"] = "partial"
ciliumVals["sessionAffinity"] = true
ciliumVals["cni"] = map[string]any{
values["kubeProxyReplacementHealthzBindAddr"] = ""
values["kubeProxyReplacement"] = "partial"
values["sessionAffinity"] = true
values["cni"] = map[string]any{
"chainingMode": "portmap",
}
}
return chart, values, nil
}
return helm.Release{Chart: chartRaw, Values: ciliumVals, ReleaseName: "cilium", Wait: true}, nil
func (i *ChartLoader) loadCertManager() (helm.Release, error) {
chart, values, err := i.loadCertManagerHelper()
if err != nil {
return helm.Release{}, err
}
chartRaw, err := i.marshalChart(chart)
if err != nil {
return helm.Release{}, fmt.Errorf("packaging cert-manager chart: %w", err)
}
return helm.Release{Chart: chartRaw, Values: values, ReleaseName: "cert-manager", Wait: false}, nil
}
// loadCertManagerHelper is used to separate the marshalling step from the loading step.
// This reduces the time unit tests take to execute.
func (i *ChartLoader) loadCertManagerHelper() (*chart.Chart, map[string]any, error) {
chart, err := loadChartsDir(helmFS, "charts/cert-manager")
if err != nil {
return nil, nil, fmt.Errorf("loading cert-manager chart: %w", err)
}
values := map[string]any{
"installCRDs": true,
"prometheus": map[string]any{
"enabled": false,
},
"tolerations": []map[string]any{
{
"key": "node-role.kubernetes.io/control-plane",
"effect": "NoSchedule",
"operator": "Exists",
},
{
"key": "node-role.kubernetes.io/master",
"effect": "NoSchedule",
"operator": "Exists",
},
},
"webhook": map[string]any{
"tolerations": []map[string]any{
{
"key": "node-role.kubernetes.io/control-plane",
"effect": "NoSchedule",
"operator": "Exists",
},
{
"key": "node-role.kubernetes.io/master",
"effect": "NoSchedule",
"operator": "Exists",
},
},
},
"cainjector": map[string]any{
"tolerations": []map[string]any{
{
"key": "node-role.kubernetes.io/control-plane",
"effect": "NoSchedule",
"operator": "Exists",
},
{
"key": "node-role.kubernetes.io/master",
"effect": "NoSchedule",
"operator": "Exists",
},
},
},
"startupapicheck": map[string]any{
"timeout": "5m",
"tolerations": []map[string]any{
{
"key": "node-role.kubernetes.io/control-plane",
"effect": "NoSchedule",
"operator": "Exists",
},
{
"key": "node-role.kubernetes.io/master",
"effect": "NoSchedule",
"operator": "Exists",
},
},
},
}
return chart, values, nil
}
func (i *ChartLoader) loadOperators(csp cloudprovider.Provider) (helm.Release, error) {
chart, values, err := i.loadOperatorsHelper(csp)
if err != nil {
return helm.Release{}, err
}
chartRaw, err := i.marshalChart(chart)
if err != nil {
return helm.Release{}, fmt.Errorf("packaging operators chart: %w", err)
}
return helm.Release{Chart: chartRaw, Values: values, ReleaseName: "con-operators", Wait: false}, nil
}
// loadOperatorsHelper is used to separate the marshalling step from the loading step.
// This reduces the time unit tests take to execute.
func (i *ChartLoader) loadOperatorsHelper(csp cloudprovider.Provider) (*chart.Chart, map[string]any, error) {
chart, err := loadChartsDir(helmFS, "charts/edgeless/operators")
if err != nil {
return nil, nil, fmt.Errorf("loading operators chart: %w", err)
}
values := map[string]any{
"constellation-operator": map[string]any{
"controllerManager": map[string]any{
"manager": map[string]any{
"image": versions.ConstellationOperatorImage,
},
},
},
"node-maintenance-operator": map[string]any{
"controllerManager": map[string]any{
"manager": map[string]any{
"image": versions.NodeMaintenanceOperatorImage,
},
},
},
}
switch csp {
case cloudprovider.Azure:
conOpVals, ok := values["constellation-operator"].(map[string]any)
if !ok {
return nil, nil, errors.New("invalid constellation-operator values")
}
conOpVals["csp"] = "Azure"
values["tags"] = map[string]any{
"Azure": true,
}
case cloudprovider.GCP:
conOpVals, ok := values["constellation-operator"].(map[string]any)
if !ok {
return nil, nil, errors.New("invalid constellation-operator values")
}
conOpVals["csp"] = "GCP"
values["tags"] = map[string]any{
"GCP": true,
}
case cloudprovider.QEMU:
conOpVals, ok := values["constellation-operator"].(map[string]any)
if !ok {
return nil, nil, errors.New("invalid constellation-operator values")
}
conOpVals["csp"] = "QEMU"
values["tags"] = map[string]any{
"QEMU": true,
}
case cloudprovider.AWS:
conOpVals, ok := values["constellation-operator"].(map[string]any)
if !ok {
return nil, nil, errors.New("invalid constellation-operator values")
}
conOpVals["csp"] = "AWS"
values["tags"] = map[string]any{
"AWS": true,
}
}
return chart, values, nil
}
// loadConstellationServices loads the constellation-services chart from the embed.FS,
// marshals it into a helm-package .tgz and sets the values that can be set in the CLI.
func (i *ChartLoader) loadConstellationServices(config *config.Config, masterSecret, salt []byte) (helm.Release, error) {
chart, err := loadChartsDir(helmFS, "charts/edgeless/constellation-services")
chart, values, err := i.loadConstellationServicesHelper(config, masterSecret, salt)
if err != nil {
return helm.Release{}, fmt.Errorf("loading constellation-services chart: %w", err)
return helm.Release{}, err
}
chartRaw, err := i.marshalChart(chart)
if err != nil {
return helm.Release{}, fmt.Errorf("packaging chart: %w", err)
return helm.Release{}, fmt.Errorf("packaging constellation-services chart: %w", err)
}
return helm.Release{Chart: chartRaw, Values: values, ReleaseName: "constellation-services", Wait: false}, nil
}
// loadConstellationServicesHelper is used to separate the marshalling step from the loading step.
// This reduces the time unit tests take to execute.
func (i *ChartLoader) loadConstellationServicesHelper(config *config.Config, masterSecret, salt []byte) (*chart.Chart, map[string]any, error) {
chart, err := loadChartsDir(helmFS, "charts/edgeless/constellation-services")
if err != nil {
return nil, nil, fmt.Errorf("loading constellation-services chart: %w", err)
}
enforcedPCRsJSON, err := json.Marshal(config.GetEnforcedPCRs())
if err != nil {
return helm.Release{}, fmt.Errorf("marshaling enforcedPCRs: %w", err)
return nil, nil, fmt.Errorf("marshaling enforcedPCRs: %w", err)
}
csp := config.GetProvider()
vals := map[string]any{
values := map[string]any{
"global": map[string]any{
"kmsPort": constants.KMSPort,
"serviceBasePath": constants.ServiceBasePath,
@ -164,98 +365,97 @@ func (i *ChartLoader) loadConstellationServices(config *config.Config, masterSec
"measurementsFilename": constants.MeasurementsFilename,
},
"join-service": map[string]any{
"csp": csp,
"csp": csp.String(),
"enforcedPCRs": string(enforcedPCRsJSON),
"image": i.joinServiceImage,
},
"ccm": map[string]any{
"csp": csp,
"csp": csp.String(),
},
"autoscaler": map[string]any{
"csp": csp,
"csp": csp.String(),
"image": i.autoscalerImage,
},
}
switch csp {
case cloudprovider.Azure:
joinServiceVals, ok := vals["join-service"].(map[string]any)
joinServiceVals, ok := values["join-service"].(map[string]any)
if !ok {
return helm.Release{}, errors.New("invalid join-service values")
return nil, nil, errors.New("invalid join-service values")
}
joinServiceVals["enforceIdKeyDigest"] = config.EnforcesIDKeyDigest()
ccmVals, ok := vals["ccm"].(map[string]any)
ccmVals, ok := values["ccm"].(map[string]any)
if !ok {
return helm.Release{}, errors.New("invalid ccm values")
return nil, nil, errors.New("invalid ccm values")
}
ccmVals["Azure"] = map[string]any{
"image": i.ccmImage,
}
vals["cnm"] = map[string]any{
values["cnm"] = map[string]any{
"image": i.cnmImage,
}
vals["azure"] = map[string]any{
values["azure"] = map[string]any{
"deployCSIDriver": config.DeployCSIDriver(),
}
vals["azuredisk-csi-driver"] = map[string]any{
values["azuredisk-csi-driver"] = map[string]any{
"node": map[string]any{
"kmsPort": constants.KMSPort,
"kmsNamespace": "", // empty namespace means we use the release namespace
},
}
vals["tags"] = map[string]any{
values["tags"] = map[string]any{
"Azure": true,
}
case cloudprovider.GCP:
ccmVals, ok := vals["ccm"].(map[string]any)
ccmVals, ok := values["ccm"].(map[string]any)
if !ok {
return helm.Release{}, errors.New("invalid ccm values")
return nil, nil, errors.New("invalid ccm values")
}
ccmVals["GCP"] = map[string]any{
"image": i.ccmImage,
}
vals["gcp"] = map[string]any{
values["gcp"] = map[string]any{
"deployCSIDriver": config.DeployCSIDriver(),
}
vals["gcp-compute-persistent-disk-csi-driver"] = map[string]any{
values["gcp-compute-persistent-disk-csi-driver"] = map[string]any{
"csiNode": map[string]any{
"kmsPort": constants.KMSPort,
"kmsNamespace": "", // empty namespace means we use the release namespace
},
}
vals["tags"] = map[string]any{
values["tags"] = map[string]any{
"GCP": true,
}
case cloudprovider.QEMU:
vals["tags"] = map[string]interface{}{
values["tags"] = map[string]interface{}{
"QEMU": true,
}
case cloudprovider.AWS:
ccmVals, ok := vals["ccm"].(map[string]any)
ccmVals, ok := values["ccm"].(map[string]any)
if !ok {
return helm.Release{}, errors.New("invalid ccm values")
return nil, nil, errors.New("invalid ccm values")
}
ccmVals["AWS"] = map[string]any{
"image": i.ccmImage,
}
vals["tags"] = map[string]any{
values["tags"] = map[string]any{
"AWS": true,
}
}
return helm.Release{Chart: chartRaw, Values: vals, ReleaseName: "constellation-services", Wait: true}, nil
return chart, values, nil
}
// marshalChart takes a Chart object, packages it to a temporary file and returns the content of that file.

View File

@ -15,6 +15,7 @@ import (
"path"
"testing"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
"github.com/edgelesssys/constellation/v2/internal/config"
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
"github.com/pkg/errors"
@ -44,8 +45,8 @@ func TestLoad(t *testing.T) {
assert.NotNil(chart.Dependencies())
}
// TestTemplate checks if the rendered constellation-services chart produces the expected yaml files.
func TestTemplate(t *testing.T) {
// TestConstellationServices checks if the rendered constellation-services chart produces the expected yaml files.
func TestConstellationServices(t *testing.T) {
testCases := map[string]struct {
config *config.Config
enforceIDKeyDigest bool
@ -88,14 +89,7 @@ func TestTemplate(t *testing.T) {
require := require.New(t)
chartLoader := ChartLoader{joinServiceImage: "joinServiceImage", kmsImage: "kmsImage", ccmImage: tc.ccmImage, cnmImage: tc.cnmImage, autoscalerImage: "autoscalerImage"}
release, err := chartLoader.Load(tc.config, true, []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
require.NoError(err)
var helmReleases helm.Releases
err = json.Unmarshal(release, &helmReleases)
require.NoError(err)
reader := bytes.NewReader(helmReleases.ConstellationServices.Chart)
chart, err := loader.LoadArchive(reader)
chart, values, err := chartLoader.loadConstellationServicesHelper(tc.config, []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), []byte("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"))
require.NoError(err)
options := chartutil.ReleaseOptions{
@ -107,14 +101,14 @@ func TestTemplate(t *testing.T) {
}
caps := &chartutil.Capabilities{}
err = tc.valuesModifier(helmReleases.ConstellationServices.Values)
err = tc.valuesModifier(values)
require.NoError(err)
// This step is needed to enabled/disable subcharts according to their tags/conditions.
err = chartutil.ProcessDependencies(chart, helmReleases.ConstellationServices.Values)
err = chartutil.ProcessDependencies(chart, values)
require.NoError(err)
valuesToRender, err := chartutil.ToRenderValues(chart, helmReleases.ConstellationServices.Values, options, caps)
valuesToRender, err := chartutil.ToRenderValues(chart, values, options, caps)
require.NoError(err)
result, err := engine.Render(chart, valuesToRender)
@ -135,6 +129,69 @@ func TestTemplate(t *testing.T) {
}
}
// TestOperators checks if the rendered constellation-services chart produces the expected yaml files.
func TestOperators(t *testing.T) {
testCases := map[string]struct {
csp cloudprovider.Provider
}{
"GCP": {
csp: cloudprovider.GCP,
},
"Azure": {
csp: cloudprovider.Azure,
},
"QEMU": {
csp: cloudprovider.QEMU,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
chartLoader := ChartLoader{joinServiceImage: "joinServiceImage", kmsImage: "kmsImage", ccmImage: "ccmImage", cnmImage: "cnmImage", autoscalerImage: "autoscalerImage"}
chart, vals, err := chartLoader.loadOperatorsHelper(tc.csp)
require.NoError(err)
options := chartutil.ReleaseOptions{
Name: "testRelease",
Namespace: "testNamespace",
Revision: 1,
IsInstall: true,
IsUpgrade: false,
}
caps := &chartutil.Capabilities{}
conOpVals, ok := vals["constellation-operator"].(map[string]any)
require.True(ok)
conOpVals["constellationUID"] = "42424242424242"
// This step is needed to enabled/disable subcharts according to their tags/conditions.
err = chartutil.ProcessDependencies(chart, vals)
require.NoError(err)
valuesToRender, err := chartutil.ToRenderValues(chart, vals, options, caps)
require.NoError(err)
result, err := engine.Render(chart, valuesToRender)
require.NoError(err)
for k, v := range result {
currentFile := path.Join("testdata", tc.csp.String(), k)
content, err := os.ReadFile(currentFile)
// If a file does not exist, we expect the render for that path to be empty.
if errors.Is(err, fs.ErrNotExist) {
assert.YAMLEq("", v, fmt.Sprintf("current file: %s", currentFile))
continue
}
assert.NoError(err)
assert.YAMLEq(string(content), v, fmt.Sprintf("current file: %s", currentFile))
}
})
}
}
func prepareGCPValues(values map[string]any) error {
joinVals, ok := values["join-service"].(map[string]any)
if !ok {

View File

@ -0,0 +1,131 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: constellation-operator-controller-manager
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: constellation-operator-controller-manager
namespace: testNamespace
labels:
control-plane: controller-manager
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
template:
metadata:
labels:
control-plane: controller-manager
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
annotations:
kubectl.kubernetes.io/default-container: manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=0
env:
- name: KUBERNETES_CLUSTER_DOMAIN
value: cluster.local
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.11.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
protocol: TCP
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=127.0.0.1:8080
- --leader-elect
command:
- /manager
env:
- name: KUBERNETES_CLUSTER_DOMAIN
value: cluster.local
- name: CONSTEL_CSP
value: Azure
- name: constellation-uid
value: 42424242424242
image: ghcr.io/edgelesssys/constellation/node-operator:v2.3.0-pre.0.20221108173951-34435e439604
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- mountPath: /etc/kubernetes/pki/etcd
name: etcd-certs
- mountPath: /etc/azure
name: azureconfig
readOnly: true
- mountPath: /etc/gce
name: gceconf
readOnly: true
nodeSelector:
node-role.kubernetes.io/control-plane: ""
securityContext:
runAsUser: 0
serviceAccountName: constellation-operator-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
volumes:
- hostPath:
path: /etc/kubernetes/pki/etcd
type: Directory
name: etcd-certs
- name: azureconfig
secret:
optional: true
secretName: azureconfig
- configMap:
name: gceconf
optional: true
name: gceconf

View File

@ -0,0 +1,61 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: constellation-operator-leader-election-role
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: constellation-operator-leader-election-rolebinding
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: 'constellation-operator-leader-election-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,23 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: constellation-operator-manager-config
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
data:
controller_manager_config.yaml: |
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
health:
healthProbeBindAddress: ":8081"
kind: ControllerManagerConfig
leaderElection:
leaderElect: true
resourceName: "38cc1645.edgeless.systems"
metrics:
bindAddress: "127.0.0.1:8080"
webhook:
port: 9443

View File

@ -0,0 +1,183 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-manager-role
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- get
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- delete
- get
- list
- update
- watch
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- autoscalingstrategies/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimage
verbs:
- get
- list
- watch
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- nodeimages/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- pendingnodes/status
verbs:
- get
- patch
- update
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups/finalizers
verbs:
- update
- apiGroups:
- update.edgeless.systems
resources:
- scalinggroups/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: constellation-operator-manager-rolebinding
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'constellation-operator-manager-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-metrics-reader
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: Service
metadata:
name: constellation-operator-controller-manager-metrics-service
namespace: testNamespace
labels:
control-plane: controller-manager
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
control-plane: controller-manager
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https

View File

@ -0,0 +1,42 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: constellation-operator-proxy-role
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: constellation-operator-proxy-rolebinding
namespace: testNamespace
labels:
helm.sh/chart: constellation-operator-2.3.0-pre
app.kubernetes.io/name: constellation-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'constellation-operator-proxy-role'
subjects:
- kind: ServiceAccount
name: 'constellation-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,112 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: node-maintenance-operator-controller-manager
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: node-maintenance-operator-controller-manager
namespace: testNamespace
labels:
control-plane: controller-manager
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
template:
metadata:
labels:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
annotations:
kubectl.kubernetes.io/default-container: manager
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
containers:
- args:
- --health-probe-bind-address=:8081
- --metrics-bind-address=:8080
- --leader-elect
command:
- /manager
env:
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBERNETES_CLUSTER_DOMAIN
value: cluster.local
image: ghcr.io/edgelesssys/constellation/node-maintenance-operator:v0.13.1-alpha1
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
resources:
limits:
cpu: 200m
memory: 100Mi
requests:
cpu: 100m
memory: 20Mi
securityContext:
allowPrivilegeEscalation: false
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
priorityClassName: system-cluster-critical
securityContext:
runAsNonRoot: true
serviceAccountName: node-maintenance-operator-controller-manager
terminationGracePeriodSeconds: 10
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert

View File

@ -0,0 +1,63 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: node-maintenance-operator-leader-election-role
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: node-maintenance-operator-leader-election-rolebinding
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: 'node-maintenance-operator-leader-election-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,131 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-manager-role
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- pods/eviction
verbs:
- create
- apiGroups:
- apps
resources:
- daemonsets
- deployments
- replicasets
- statefulsets
verbs:
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- create
- get
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances/finalizers
verbs:
- update
- apiGroups:
- nodemaintenance.medik8s.io
resources:
- nodemaintenances/status
verbs:
- get
- patch
- update
- apiGroups:
- oauth.openshift.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- policy
resources:
- poddisruptionbudgets
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-maintenance-operator-manager-rolebinding
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'node-maintenance-operator-manager-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-metrics-reader
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- nonResourceURLs:
- /metrics
verbs:
- get

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: node-maintenance-operator-controller-manager-metrics-service
namespace: testNamespace
labels:
control-plane: controller-manager
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
type: ClusterIP
selector:
control-plane: controller-manager
node-maintenance-operator: ""
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https

View File

@ -0,0 +1,44 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: node-maintenance-operator-proxy-role
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-maintenance-operator-proxy-rolebinding
namespace: testNamespace
labels:
node-maintenance-operator: ""
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: 'node-maintenance-operator-proxy-role'
subjects:
- kind: ServiceAccount
name: 'node-maintenance-operator-controller-manager'
namespace: 'testNamespace'

View File

@ -0,0 +1,12 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: node-maintenance-operator-selfsigned-issuer
namespace: testNamespace
labels:
helm.sh/chart: node-maintenance-operator-2.3.0-pre
app.kubernetes.io/name: node-maintenance-operator
app.kubernetes.io/instance: testRelease
app.kubernetes.io/managed-by: Helm
spec:
selfSigned: {}

Some files were not shown because too many files have changed in this diff Show More