Deploy operator-lifecycle-manager (OLM), node-maintenance-operator (NMO) and constellation-node-operator

Signed-off-by: Malte Poll <mp@edgeless.systems>
This commit is contained in:
Malte Poll 2022-08-04 16:15:52 +02:00 committed by Malte Poll
parent 18a89d2881
commit 2c7129987a
23 changed files with 8756 additions and 32 deletions

View file

@ -7,10 +7,13 @@ import (
"github.com/edgelesssys/constellation/bootstrapper/internal/kubernetes/k8sapi/resources"
corev1 "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextensionsclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
@ -21,8 +24,9 @@ const fieldManager = "constellation-bootstrapper"
// Client implements k8sapi.Client interface and talks to the Kubernetes API.
type Client struct {
clientset kubernetes.Interface
builder *resource.Builder
clientset kubernetes.Interface
apiextensionClient apiextensionsclientv1.ApiextensionsV1Interface
builder *resource.Builder
}
// New creates a new Client, talking to the real k8s API.
@ -36,13 +40,18 @@ func New(config []byte) (*Client, error) {
return nil, fmt.Errorf("creating k8s client from kubeconfig: %w", err)
}
apiextensionClient, err := apiextensionsclientv1.NewForConfig(clientConfig)
if err != nil {
return nil, fmt.Errorf("creating api extension client from kubeconfig: %w", err)
}
restClientGetter, err := newRESTClientGetter(config)
if err != nil {
return nil, fmt.Errorf("creating k8s RESTClientGetter from kubeconfig: %w", err)
}
builder := resource.NewBuilder(restClientGetter).Unstructured()
return &Client{clientset: clientset, builder: builder}, nil
return &Client{clientset: clientset, apiextensionClient: apiextensionClient, builder: builder}, nil
}
// ApplyOneObject uses server-side apply to send unstructured JSON blobs to the server and let it handle the core logic.
@ -147,3 +156,37 @@ func (c *Client) AddNodeSelectorsToDeployment(ctx context.Context, selectors map
}
return nil
}
// WaitForCRD waits for the given CRD to be established.
func (c *Client) WaitForCRD(ctx context.Context, crd string) error {
watcher, err := c.apiextensionClient.CustomResourceDefinitions().Watch(ctx, metav1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.name=%s", crd),
})
if err != nil {
return err
}
defer watcher.Stop()
for event := range watcher.ResultChan() {
switch event.Type {
case watch.Added, watch.Modified:
crd := event.Object.(*apiextensionsv1.CustomResourceDefinition)
if crdHasCondition(crd.Status.Conditions, apiextensionsv1.Established) {
return nil
}
case watch.Deleted:
return fmt.Errorf("crd %q deleted", crd)
case watch.Error:
return fmt.Errorf("crd %q error: %v", crd, event.Object)
}
}
return fmt.Errorf("crd %q not established", crd)
}
func crdHasCondition(conditions []apiextensionsv1.CustomResourceDefinitionCondition, conditionType apiextensionsv1.CustomResourceDefinitionConditionType) bool {
for _, condition := range conditions {
if condition.Type == conditionType && condition.Status == apiextensionsv1.ConditionTrue {
return true
}
}
return false
}

View file

@ -16,12 +16,15 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
k8s "k8s.io/api/core/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
apiextensionsclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/cli-runtime/pkg/resource"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
@ -376,3 +379,104 @@ func TestAddNodeSelectorsToDeployment(t *testing.T) {
})
}
}
func TestWaitForCRD(t *testing.T) {
testCases := map[string]struct {
crd string
events []watch.Event
watchErr error
wantErr bool
}{
"Success": {
crd: "test-crd",
events: []watch.Event{
{
Type: watch.Added,
Object: &apiextensionsv1.CustomResourceDefinition{
Status: apiextensionsv1.CustomResourceDefinitionStatus{
Conditions: []apiextensionsv1.CustomResourceDefinitionCondition{
{
Type: apiextensionsv1.Established,
Status: apiextensionsv1.ConditionTrue,
},
},
},
},
},
},
},
"watch error": {
crd: "test-crd",
watchErr: errors.New("watch error"),
wantErr: true,
},
"crd deleted": {
crd: "test-crd",
events: []watch.Event{{Type: watch.Deleted}},
wantErr: true,
},
"other error": {
crd: "test-crd",
events: []watch.Event{{Type: watch.Error}},
wantErr: true,
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
assert := assert.New(t)
require := require.New(t)
client := Client{
apiextensionClient: &stubCRDWatcher{events: tc.events, watchErr: tc.watchErr},
}
err := client.WaitForCRD(context.Background(), tc.crd)
if tc.wantErr {
assert.Error(err)
return
}
require.NoError(err)
})
}
}
type stubCRDWatcher struct {
events []watch.Event
watchErr error
apiextensionsclientv1.ApiextensionsV1Interface
}
func (w *stubCRDWatcher) CustomResourceDefinitions() apiextensionsclientv1.CustomResourceDefinitionInterface {
return &stubCustomResourceDefinitions{
events: w.events,
watchErr: w.watchErr,
}
}
type stubCustomResourceDefinitions struct {
events []watch.Event
watchErr error
apiextensionsclientv1.CustomResourceDefinitionInterface
}
func (c *stubCustomResourceDefinitions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
eventChan := make(chan watch.Event, len(c.events))
for _, event := range c.events {
eventChan <- event
}
return &stubCRDWatch{events: eventChan}, c.watchErr
}
type stubCRDWatch struct {
events chan watch.Event
}
func (w *stubCRDWatch) Stop() {
close(w.events)
}
func (w *stubCRDWatch) ResultChan() <-chan watch.Event {
return w.events
}

View file

@ -22,6 +22,8 @@ type Client interface {
CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error
AddTolerationsToDeployment(ctx context.Context, tolerations []corev1.Toleration, name string, namespace string) error
AddNodeSelectorsToDeployment(ctx context.Context, selectors map[string]string, name string, namespace string) error
// WaitForCRD waits for the given CRD to be established.
WaitForCRD(ctx context.Context, crd string) error
}
// clientGenerator can generate new clients from a kubeconfig.
@ -111,3 +113,19 @@ func (k *Kubectl) AddNodeSelectorsToDeployment(ctx context.Context, selectors ma
return nil
}
// WaitForCRD waits for a list of CRDs to be established.
func (k *Kubectl) WaitForCRDs(ctx context.Context, crds []string) error {
client, err := k.clientGenerator.NewClient(k.kubeconfig)
if err != nil {
return err
}
for _, crd := range crds {
err = client.WaitForCRD(ctx, crd)
if err != nil {
return err
}
}
return nil
}

View file

@ -23,6 +23,7 @@ type stubClient struct {
createConfigMapErr error
addTolerationsToDeploymentErr error
addNodeSelectorToDeploymentErr error
waitForCRDErr error
}
func (s *stubClient) ApplyOneObject(info *resource.Info, forceConflicts bool) error {
@ -53,16 +54,22 @@ type stubClientGenerator struct {
createConfigMapErr error
addTolerationsToDeploymentErr error
addNodeSelectorToDeploymentErr error
waitForCRDErr error
}
func (s *stubClient) WaitForCRD(ctx context.Context, crd string) error {
return s.waitForCRDErr
}
func (s *stubClientGenerator) NewClient(kubeconfig []byte) (Client, error) {
return &stubClient{
s.applyOneObjectErr,
s.getObjectsInfos,
s.getObjectsErr,
s.createConfigMapErr,
s.addTolerationsToDeploymentErr,
s.addNodeSelectorToDeploymentErr,
applyOneObjectErr: s.applyOneObjectErr,
getObjectsInfos: s.getObjectsInfos,
getObjectsErr: s.getObjectsErr,
createConfigMapErr: s.createConfigMapErr,
addTolerationsToDeploymentErr: s.addTolerationsToDeploymentErr,
addNodeSelectorToDeploymentErr: s.addNodeSelectorToDeploymentErr,
waitForCRDErr: s.waitForCRDErr,
}, s.newClientErr
}

View file

@ -10,6 +10,8 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const accessManagerNamespace = "kube-system"
// accessManagerDeployment holds the configuration for the SSH user creation pods. User/Key definitions are stored in the ConfigMap, and the manager is deployed on each node by the DaemonSet.
type accessManagerDeployment struct {
ConfigMap k8s.ConfigMap
@ -35,7 +37,7 @@ func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeploy
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-access-manager",
Namespace: "kube-system",
Namespace: accessManagerNamespace,
},
AutomountServiceAccountToken: proto.Bool(true),
},
@ -46,7 +48,7 @@ func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeploy
},
ObjectMeta: v1.ObjectMeta{
Name: "ssh-users",
Namespace: "kube-system",
Namespace: accessManagerNamespace,
},
Data: sshUsers,
},
@ -57,7 +59,7 @@ func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeploy
},
ObjectMeta: v1.ObjectMeta{
Name: "constellation-access-manager",
Namespace: "kube-system",
Namespace: accessManagerNamespace,
Labels: map[string]string{
"app.kubernetes.io/instance": "constellation",
"app.kubernetes.io/name": "constellation-access-manager",
@ -148,7 +150,7 @@ func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeploy
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-access-manager",
Namespace: "kube-system",
Namespace: accessManagerNamespace,
},
Rules: []rbac.PolicyRule{
{
@ -177,7 +179,7 @@ func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeploy
"app.kubernetes.io/managed-by": "Constellation",
},
Name: "constellation-access-manager",
Namespace: "kube-system",
Namespace: accessManagerNamespace,
},
RoleRef: rbac.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
@ -188,11 +190,11 @@ func NewAccessManagerDeployment(sshUsers map[string]string) *accessManagerDeploy
{
Kind: "ServiceAccount",
Name: "constellation-access-manager",
Namespace: "kube-system",
Namespace: accessManagerNamespace,
},
},
},
ImagePullSecret: NewImagePullSecret(),
ImagePullSecret: NewImagePullSecret(accessManagerNamespace),
}
}

View file

@ -10,7 +10,7 @@ import (
)
// NewImagePullSecret creates a new k8s.Secret from the config for authenticating when pulling images.
func NewImagePullSecret() k8s.Secret {
func NewImagePullSecret(namespace string) k8s.Secret {
base64EncodedSecret := base64.StdEncoding.EncodeToString(
[]byte(fmt.Sprintf("%s:%s", secrets.PullSecretUser, secrets.PullSecretToken)),
)
@ -24,7 +24,7 @@ func NewImagePullSecret() k8s.Secret {
},
ObjectMeta: meta.ObjectMeta{
Name: secrets.PullSecretName,
Namespace: "kube-system",
Namespace: namespace,
},
StringData: map[string]string{".dockerconfigjson": pullSecretDockerCfgJSON},
Type: "kubernetes.io/dockerconfigjson",

View file

@ -7,7 +7,8 @@ import (
)
func TestImagePullSecret(t *testing.T) {
imgPullSec := NewImagePullSecret()
imgPullSec := NewImagePullSecret("namespace")
_, err := imgPullSec.Marshal()
assert.NoError(t, err)
assert.Equal(t, "namespace", imgPullSec.Namespace)
}

View file

@ -13,6 +13,8 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
)
const kmsNamespace = "kube-system"
type kmsDeployment struct {
ServiceAccount k8s.ServiceAccount
Service k8s.Service
@ -43,7 +45,7 @@ func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Namespace: "kube-system",
Namespace: kmsNamespace,
},
},
Service: k8s.Service{
@ -53,7 +55,7 @@ func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
},
ObjectMeta: meta.ObjectMeta{
Name: "kms",
Namespace: "kube-system",
Namespace: kmsNamespace,
},
Spec: k8s.ServiceSpec{
Type: k8s.ServiceTypeClusterIP,
@ -106,7 +108,7 @@ func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
{
Kind: "ServiceAccount",
Name: "kms",
Namespace: "kube-system",
Namespace: kmsNamespace,
},
},
},
@ -120,7 +122,7 @@ func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
"k8s-app": "kms",
},
Name: "kms",
Namespace: "kube-system",
Namespace: kmsNamespace,
},
Spec: apps.DeploymentSpec{
Selector: &meta.LabelSelector{
@ -239,7 +241,7 @@ func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
},
ObjectMeta: meta.ObjectMeta{
Name: constants.ConstellationMasterSecretStoreName,
Namespace: "kube-system",
Namespace: kmsNamespace,
},
Data: map[string][]byte{
constants.ConstellationMasterSecretKey: config.MasterSecret,
@ -247,7 +249,7 @@ func NewKMSDeployment(csp string, config KMSConfig) *kmsDeployment {
},
Type: "Opaque",
},
ImagePullSecret: NewImagePullSecret(),
ImagePullSecret: NewImagePullSecret(kmsNamespace),
}
}

View file

@ -0,0 +1,78 @@
package resources
import (
"time"
"github.com/edgelesssys/constellation/internal/versions"
operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
nodeMaintenanceOperatorNamespace = "kube-system"
nodeMaintenanceOperatorCatalogNamespace = "olm"
)
type nodeMaintenanceOperatorDeployment struct {
CatalogSource operatorsv1alpha1.CatalogSource
OperatorGroup operatorsv1.OperatorGroup
Subscription operatorsv1alpha1.Subscription
}
// NewNodeMaintenanceOperatorDeployment creates a new node maintenance operator (NMO) deployment.
// See https://github.com/medik8s/node-maintenance-operator for more information.
func NewNodeMaintenanceOperatorDeployment() *nodeMaintenanceOperatorDeployment {
return &nodeMaintenanceOperatorDeployment{
CatalogSource: operatorsv1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
ObjectMeta: metav1.ObjectMeta{
Name: "node-maintenance-operator-catalog",
Namespace: nodeMaintenanceOperatorCatalogNamespace,
},
Spec: operatorsv1alpha1.CatalogSourceSpec{
SourceType: "grpc",
Image: versions.NodeMaintenanceOperatorCatalogImage + ":" + versions.NodeMaintenanceOperatorVersion,
DisplayName: "Node Maintenance Operator",
Publisher: "Medik8s Team",
UpdateStrategy: &operatorsv1alpha1.UpdateStrategy{
RegistryPoll: &operatorsv1alpha1.RegistryPoll{
RawInterval: "1m0s",
Interval: &metav1.Duration{
Duration: time.Minute,
},
},
},
},
},
OperatorGroup: operatorsv1.OperatorGroup{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1", Kind: "OperatorGroup"},
ObjectMeta: metav1.ObjectMeta{
Name: "constellation-og",
Namespace: nodeMaintenanceOperatorNamespace,
},
Spec: operatorsv1.OperatorGroupSpec{
UpgradeStrategy: operatorsv1.UpgradeStrategyDefault,
},
},
Subscription: operatorsv1alpha1.Subscription{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "Subscription"},
ObjectMeta: metav1.ObjectMeta{
Name: "node-maintenance-operator-sub",
Namespace: nodeMaintenanceOperatorNamespace,
},
Spec: &operatorsv1alpha1.SubscriptionSpec{
Channel: "stable",
Package: "node-maintenance-operator",
CatalogSource: "node-maintenance-operator-catalog",
CatalogSourceNamespace: "olm",
InstallPlanApproval: operatorsv1alpha1.ApprovalAutomatic,
StartingCSV: "node-maintenance-operator." + versions.NodeMaintenanceOperatorVersion,
},
},
}
}
func (c *nodeMaintenanceOperatorDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,21 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNodeMaintenanceOperatorMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
nmoDepl := NewNodeMaintenanceOperatorDeployment()
data, err := nmoDepl.Marshal()
require.NoError(err)
var recreated nodeMaintenanceOperatorDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(nmoDepl, &recreated)
}

View file

@ -0,0 +1,98 @@
package resources
import (
_ "embed"
"time"
"github.com/edgelesssys/constellation/internal/secrets"
"github.com/edgelesssys/constellation/internal/versions"
operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
nodeOperatorNamespace = "kube-system"
nodeOperatorCatalogNamespace = "olm"
)
// NodeOperatorCRDNames are the names of the custom resource definitions that are used by the node operator.
var NodeOperatorCRDNames = []string{
"autoscalingstrategies.update.edgeless.systems",
"nodeimages.update.edgeless.systems",
"pendingnodes.update.edgeless.systems",
"scalinggroups.update.edgeless.systems",
}
type nodeOperatorDeployment struct {
CatalogSource operatorsv1alpha1.CatalogSource
OperatorGroup operatorsv1.OperatorGroup
Subscription operatorsv1alpha1.Subscription
CatalogPullSecret corev1.Secret
ImagePullSecret corev1.Secret
}
// NewNodeOperatorDeployment creates a new constellation node operator deployment.
// See /operators/constellation-node-operator for more information.
func NewNodeOperatorDeployment(cloudProvider string, uid string) *nodeOperatorDeployment {
return &nodeOperatorDeployment{
CatalogSource: operatorsv1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "CatalogSource"},
ObjectMeta: metav1.ObjectMeta{
Name: "constellation-node-operator-catalog",
Namespace: nodeOperatorCatalogNamespace,
},
Spec: operatorsv1alpha1.CatalogSourceSpec{
SourceType: "grpc",
Secrets: []string{secrets.PullSecretName},
Image: versions.NodeOperatorCatalogImage + ":" + versions.NodeOperatorVersion,
DisplayName: "Constellation Node Operator",
Publisher: "Edgeless Systems",
UpdateStrategy: &operatorsv1alpha1.UpdateStrategy{
RegistryPoll: &operatorsv1alpha1.RegistryPoll{
RawInterval: "1m0s",
Interval: &metav1.Duration{Duration: 1 * time.Minute},
},
},
},
},
OperatorGroup: operatorsv1.OperatorGroup{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1", Kind: "OperatorGroup"},
ObjectMeta: metav1.ObjectMeta{
Name: "constellation-og",
Namespace: nodeOperatorNamespace,
},
Spec: operatorsv1.OperatorGroupSpec{
UpgradeStrategy: operatorsv1.UpgradeStrategyDefault,
},
},
Subscription: operatorsv1alpha1.Subscription{
TypeMeta: metav1.TypeMeta{APIVersion: "operators.coreos.com/v1alpha1", Kind: "Subscription"},
ObjectMeta: metav1.ObjectMeta{
Name: "constellation-node-operator-sub",
Namespace: nodeOperatorNamespace,
},
Spec: &operatorsv1alpha1.SubscriptionSpec{
Channel: "alpha",
Package: "node-operator",
CatalogSource: "constellation-node-operator-catalog",
CatalogSourceNamespace: "olm",
InstallPlanApproval: operatorsv1alpha1.ApprovalAutomatic,
StartingCSV: "node-operator." + versions.NodeOperatorVersion,
Config: &operatorsv1alpha1.SubscriptionConfig{
Env: []corev1.EnvVar{
{Name: "CONSTEL_CSP", Value: cloudProvider},
{Name: "constellation-uid", Value: uid},
},
},
},
},
CatalogPullSecret: NewImagePullSecret(nodeOperatorCatalogNamespace),
ImagePullSecret: NewImagePullSecret(nodeOperatorNamespace),
}
}
func (c *nodeOperatorDeployment) Marshal() ([]byte, error) {
return MarshalK8SResources(c)
}

View file

@ -0,0 +1,21 @@
package resources
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNodeOperatorMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
nmoDepl := NewNodeOperatorDeployment("csp", "uid")
data, err := nmoDepl.Marshal()
require.NoError(err)
var recreated nodeOperatorDeployment
require.NoError(UnmarshalK8SResources(data, &recreated))
assert.Equal(nmoDepl, &recreated)
}

View file

@ -0,0 +1,31 @@
package resources
import "github.com/edgelesssys/constellation/internal/crds"
// OLMCRDNames are the names of the custom resource definitions that are used by the olm operator.
var OLMCRDNames = []string{
"catalogsources.operators.coreos.com",
"clusterserviceversions.operators.coreos.com",
"installplans.operators.coreos.com",
"olmconfigs.operators.coreos.com",
"operatorconditions.operators.coreos.com",
"operatorgroups.operators.coreos.com",
"operators.operators.coreos.com",
"subscriptions.operators.coreos.com",
}
// OperatorLifecycleManagerCRDs contains custom resource definitions used by the olm operator.
type OperatorLifecycleManagerCRDs struct{}
// Marshal returns the already marshalled CRDs.
func (m *OperatorLifecycleManagerCRDs) Marshal() ([]byte, error) {
return crds.OLMCRDs, nil
}
// OperatorLifecycleManager is the deployment of the olm operator.
type OperatorLifecycleManager struct{}
// Marshal returns the already marshalled deployment yaml.
func (m *OperatorLifecycleManager) Marshal() ([]byte, error) {
return crds.OLM, nil
}

View file

@ -36,6 +36,8 @@ const (
kubeConfig = "/etc/kubernetes/admin.conf"
// kubeletStartTimeout is the maximum time given to the kubelet service to (re)start.
kubeletStartTimeout = 10 * time.Minute
// crdTimeout is the maximum time given to the CRDs to be created.
crdTimeout = 15 * time.Second
)
var providerIDRegex = regexp.MustCompile(`^azure:///subscriptions/([^/]+)/resourceGroups/([^/]+)/providers/Microsoft.Compute/virtualMachineScaleSets/([^/]+)/virtualMachines/([^/]+)$`)
@ -47,6 +49,7 @@ type Client interface {
CreateConfigMap(ctx context.Context, configMap corev1.ConfigMap) error
AddTolerationsToDeployment(ctx context.Context, tolerations []corev1.Toleration, name string, namespace string) error
AddNodeSelectorsToDeployment(ctx context.Context, selectors map[string]string, name string, namespace string) error
WaitForCRDs(ctx context.Context, crds []string) error
}
type installer interface {
@ -366,6 +369,26 @@ func (k *KubernetesUtil) SetupVerificationService(kubectl Client, verificationSe
return kubectl.Apply(verificationServiceConfiguration, true)
}
func (k *KubernetesUtil) SetupOperatorLifecycleManager(ctx context.Context, kubectl Client, olmCRDs, olmConfiguration resources.Marshaler, crdNames []string) error {
if err := kubectl.Apply(olmCRDs, true); err != nil {
return fmt.Errorf("applying OLM CRDs: %w", err)
}
crdReadyTimeout, cancel := context.WithTimeout(ctx, crdTimeout)
defer cancel()
if err := kubectl.WaitForCRDs(crdReadyTimeout, crdNames); err != nil {
return fmt.Errorf("waiting for OLM CRDs: %w", err)
}
return kubectl.Apply(olmConfiguration, true)
}
func (k *KubernetesUtil) SetupNodeMaintenanceOperator(kubectl Client, nodeMaintenanceOperatorConfiguration resources.Marshaler) error {
return kubectl.Apply(nodeMaintenanceOperatorConfiguration, true)
}
func (k *KubernetesUtil) SetupNodeOperator(ctx context.Context, kubectl Client, nodeOperatorConfiguration resources.Marshaler) error {
return kubectl.Apply(nodeOperatorConfiguration, true)
}
// JoinCluster joins existing Kubernetes cluster using kubeadm join.
func (k *KubernetesUtil) JoinCluster(ctx context.Context, joinConfig []byte, log *logger.Logger) error {
// TODO: audit policy should be user input