AB#2635: Deploy Konnectivity via Helm

This commit is contained in:
Otto Bittner 2022-11-23 08:26:09 +01:00
parent 7283eeb798
commit 3e71459898
24 changed files with 484 additions and 230 deletions

View File

@ -13,9 +13,7 @@ import (
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/certificate"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/versions"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apiserver/pkg/apis/apiserver"
@ -28,13 +26,6 @@ const (
KonnectivityKeyFilename = "/etc/kubernetes/konnectivity.key"
)
// KonnectivityAgents bundles all necessary agent deployments.
type KonnectivityAgents struct {
DaemonSet appsv1.DaemonSet
ClusterRoleBinding rbacv1.ClusterRoleBinding
ServiceAccount corev1.ServiceAccount
}
// KonnectivityServerStaticPod deployment.
type KonnectivityServerStaticPod struct {
StaticPod corev1.Pod
@ -45,178 +36,6 @@ type EgressSelectorConfiguration struct {
EgressSelectorConfiguration apiserver.EgressSelectorConfiguration
}
// NewKonnectivityAgents create new KonnectivityAgents.
func NewKonnectivityAgents(konnectivityServerAddress string) *KonnectivityAgents {
return &KonnectivityAgents{
DaemonSet: appsv1.DaemonSet{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "DaemonSet",
},
ObjectMeta: metav1.ObjectMeta{
Name: "konnectivity-agent",
Namespace: "kube-system",
Labels: map[string]string{
"k8s-app": "konnectivity-agent",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
Spec: appsv1.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"k8s-app": "konnectivity-agent",
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"k8s-app": "konnectivity-agent",
},
},
Spec: corev1.PodSpec{
PriorityClassName: "system-cluster-critical",
Tolerations: []corev1.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "node-role.kubernetes.io/control-plane",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "CriticalAddonsOnly",
Operator: corev1.TolerationOpExists,
},
{
Key: "node.kubernetes.io/not-ready",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoExecute,
},
},
Containers: []corev1.Container{
{
Name: "konnectivity-agent",
Image: versions.KonnectivityAgentImage,
Command: []string{
"/proxy-agent",
},
Args: []string{
"--logtostderr=true",
"--proxy-server-host=" + konnectivityServerAddress,
"--ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
"--proxy-server-port=8132",
"--admin-server-port=8133",
"--health-server-port=8134",
"--service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token",
"--agent-identifiers=host=$(HOST_IP)",
// we will be able to avoid constant polling when either one is done:
// https://github.com/kubernetes-sigs/apiserver-network-proxy/issues/358
// https://github.com/kubernetes-sigs/apiserver-network-proxy/issues/273
"--sync-forever=true",
// Ensure stable connection to the konnectivity server.
"--keepalive-time=60m",
"--sync-interval=5s",
"--sync-interval-cap=30s",
"--probe-interval=5s",
"--v=3",
},
Env: []corev1.EnvVar{
{
Name: "HOST_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.hostIP",
},
},
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "konnectivity-agent-token",
MountPath: "/var/run/secrets/tokens",
ReadOnly: true,
},
},
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8134),
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 15,
},
},
},
ServiceAccountName: "konnectivity-agent",
Volumes: []corev1.Volume{
{
Name: "konnectivity-agent-token",
VolumeSource: corev1.VolumeSource{
Projected: &corev1.ProjectedVolumeSource{
Sources: []corev1.VolumeProjection{
{
ServiceAccountToken: &corev1.ServiceAccountTokenProjection{
Audience: "system:konnectivity-server",
Path: "konnectivity-agent-token",
},
},
},
},
},
},
},
},
},
},
},
ClusterRoleBinding: rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "system:konnectivity-server",
Labels: map[string]string{
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: "system:auth-delegator",
},
Subjects: []rbacv1.Subject{
{
APIGroup: "rbac.authorization.k8s.io",
Kind: "User",
Name: "system:konnectivity-server",
},
},
},
ServiceAccount: corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: "konnectivity-agent",
Namespace: "kube-system",
Labels: map[string]string{
"kubernetes.io/cluster-service": "true",
"addonmanager.kubernetes.io/mode": "Reconcile",
},
},
},
}
}
// NewKonnectivityServerStaticPod create a new KonnectivityServerStaticPod.
func NewKonnectivityServerStaticPod() *KonnectivityServerStaticPod {
udsHostPathType := corev1.HostPathDirectoryOrCreate
@ -363,11 +182,6 @@ func NewEgressSelectorConfiguration() *EgressSelectorConfiguration {
}
}
// Marshal to Kubernetes YAML.
func (v *KonnectivityAgents) Marshal() ([]byte, error) {
return kubernetes.MarshalK8SResources(v)
}
// Marshal to Kubernetes YAML.
func (v *KonnectivityServerStaticPod) Marshal() ([]byte, error) {
return kubernetes.MarshalK8SResources(v)

View File

@ -1,28 +0,0 @@
/*
Copyright (c) Edgeless Systems GmbH
SPDX-License-Identifier: AGPL-3.0-only
*/
package resources
import (
"testing"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestKonnectivityMarshalUnmarshal(t *testing.T) {
require := require.New(t)
assert := assert.New(t)
kmsDepl := NewKonnectivityAgents("192.168.2.1")
data, err := kmsDepl.Marshal()
require.NoError(err)
var recreated KonnectivityAgents
require.NoError(kubernetes.UnmarshalK8SResources(data, &recreated))
assert.Equal(kmsDepl, &recreated)
}

View File

@ -12,7 +12,6 @@ import (
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
"github.com/edgelesssys/constellation/v2/internal/deploy/helm"
"github.com/edgelesssys/constellation/v2/internal/kubernetes"
"github.com/edgelesssys/constellation/v2/internal/logger"
"github.com/edgelesssys/constellation/v2/internal/role"
"github.com/edgelesssys/constellation/v2/internal/versions"
@ -23,7 +22,6 @@ type clusterUtil interface {
InstallComponentsFromCLI(ctx context.Context, kubernetesComponents versions.ComponentVersions) error
InitCluster(ctx context.Context, initConfig []byte, nodeName string, ips []net.IP, controlPlaneEndpoint string, conformanceMode bool, log *logger.Logger) error
JoinCluster(ctx context.Context, joinConfig []byte, peerRole role.Role, controlPlaneEndpoint string, log *logger.Logger) error
SetupKonnectivity(kubectl k8sapi.Client, konnectivityAgentsDaemonSet kubernetes.Marshaler) error
FixCilium(log *logger.Logger)
StartKubelet() error
}

View File

@ -19,7 +19,6 @@ import (
"time"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi"
"github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/k8sapi/resources"
kubewaiter "github.com/edgelesssys/constellation/v2/bootstrapper/internal/kubernetes/kubeWaiter"
"github.com/edgelesssys/constellation/v2/internal/cloud/azureshared"
"github.com/edgelesssys/constellation/v2/internal/cloud/cloudprovider"
@ -188,18 +187,7 @@ func (k *KubeWrapper) InitCluster(
} else {
controlPlaneIP = controlPlaneEndpoint
}
if err = k.clusterUtil.SetupKonnectivity(k.client, resources.NewKonnectivityAgents(controlPlaneIP)); err != nil {
return nil, fmt.Errorf("setting up konnectivity: %w", err)
}
loadBalancerIP := controlPlaneEndpoint
if strings.Contains(controlPlaneEndpoint, ":") {
loadBalancerIP, _, err = net.SplitHostPort(controlPlaneEndpoint)
if err != nil {
return nil, fmt.Errorf("splitting host port: %w", err)
}
}
serviceConfig := constellationServicesConfig{k.initialMeasurementsJSON, idKeyDigest, measurementSalt, subnetworkPodCIDR, cloudServiceAccountURI, loadBalancerIP}
serviceConfig := constellationServicesConfig{k.initialMeasurementsJSON, idKeyDigest, measurementSalt, subnetworkPodCIDR, cloudServiceAccountURI, controlPlaneIP}
extraVals, err := k.setupExtraVals(ctx, serviceConfig)
if err != nil {
return nil, fmt.Errorf("setting up extraVals: %w", err)
@ -399,6 +387,9 @@ func (k *KubeWrapper) setupExtraVals(ctx context.Context, serviceConfig constell
"verification-service": map[string]any{
"loadBalancerIP": serviceConfig.loadBalancerIP,
},
"konnectivity": map[string]any{
"loadBalancerIP": serviceConfig.loadBalancerIP,
},
}
instance, err := k.providerMetadata.Self(ctx)

View File

@ -177,7 +177,8 @@ func TestInitCluster(t *testing.T) {
k8sVersion: versions.Default,
},
"kubeadm init fails when setting up konnectivity": {
clusterUtil: stubClusterUtil{setupKonnectivityError: someErr},
clusterUtil: stubClusterUtil{},
helmClient: stubHelmClient{servicesError: someErr},
kubeconfigReader: &stubKubeconfigReader{
kubeconfig: []byte("someKubeconfig"),
},

View File

@ -42,6 +42,13 @@ dependencies:
- GCP
- AWS
- QEMU
- name: konnectivity
version: 2.3.0-pre
tags:
- Azure
- GCP
- AWS
- QEMU
- name: gcp-guest-agent
version: 2.3.0-pre
tags:

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,5 @@
apiVersion: v2
name: konnectivity
description: A chart to deploy konnectivity for Constellation
type: application
version: 2.3.0-pre

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: system:konnectivity-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:konnectivity-server

View File

@ -0,0 +1,76 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: konnectivity-agent
name: konnectivity-agent
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
k8s-app: konnectivity-agent
template:
metadata:
labels:
k8s-app: konnectivity-agent
spec:
containers:
- args:
- --logtostderr=true
- --proxy-server-host={{ .Values.loadBalancerIP }}
- --ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- --proxy-server-port=8132
- --admin-server-port=8133
- --health-server-port={{ .Values.healthServerPort }}
- --service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token
- --agent-identifiers=host=$(HOST_IP)
- --sync-forever=true
- --keepalive-time=60m
- --sync-interval=5s
- --sync-interval-cap=30s
- --probe-interval=5s
- --v=3
command:
- /proxy-agent
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
image: {{ .Values.image }}
livenessProbe:
httpGet:
path: /healthz
port: {{ .Values.healthServerPort }}
initialDelaySeconds: 15
timeoutSeconds: 15
name: konnectivity-agent
resources: {}
volumeMounts:
- mountPath: /var/run/secrets/tokens
name: konnectivity-agent-token
readOnly: true
priorityClassName: system-cluster-critical
serviceAccountName: konnectivity-agent
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- name: konnectivity-agent-token
projected:
sources:
- serviceAccountToken:
audience: system:konnectivity-server
path: konnectivity-agent-token
updateStrategy: {}

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: konnectivity-agent
namespace: {{ .Release.Namespace }}

View File

@ -0,0 +1,21 @@
{
"$schema": "https://json-schema.org/draft-07/schema#",
"properties": {
"image": {
"description": "Container image to use for the spawned pods.",
"type": "string",
"examples": ["us.gcr.io/k8s-artifacts-prod/kas-network-proxy/proxy-agent:v0.0.33@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da"]
},
"loadBalancerIP": {
"description": "IP of the loadbalancer serving the control plane.",
"type": "string",
"examples": ["10.4.0.1"]
}
},
"required": [
"image",
"loadBalancerIP"
],
"title": "Values",
"type": "object"
}

View File

@ -0,0 +1 @@
healthServerPort: 8134

View File

@ -46,6 +46,7 @@ type ChartLoader struct {
autoscalerImage string
verificationServiceImage string
gcpGuestAgentImage string
konnectivityImage string
}
// New creates a new ChartLoader.
@ -69,6 +70,7 @@ func New(csp cloudprovider.Provider, k8sVersion versions.ValidK8sVersion) *Chart
autoscalerImage: versions.VersionConfigs[k8sVersion].ClusterAutoscalerImage,
verificationServiceImage: versions.VerificationImage,
gcpGuestAgentImage: versions.GcpGuestImage,
konnectivityImage: versions.KonnectivityAgentImage,
}
}
@ -388,6 +390,9 @@ func (i *ChartLoader) loadConstellationServicesHelper(config *config.Config, mas
"gcp-guest-agent": map[string]any{
"image": i.gcpGuestAgentImage,
},
"konnectivity": map[string]any{
"image": i.konnectivityImage,
},
}
switch csp {

View File

@ -253,6 +253,13 @@ func prepareGCPValues(values map[string]any) error {
return errors.New("missing 'verification-service' key")
}
verificationVals["loadBalancerIP"] = "127.0.0.1"
konnectivityVals, ok := values["konnectivity"].(map[string]any)
if !ok {
return errors.New("missing 'konnectivity' key")
}
konnectivityVals["loadBalancerIP"] = "127.0.0.1"
return nil
}
@ -289,6 +296,13 @@ func prepareAzureValues(values map[string]any) error {
return errors.New("missing 'verification-service' key")
}
verificationVals["loadBalancerIP"] = "127.0.0.1"
konnectivityVals, ok := values["konnectivity"].(map[string]any)
if !ok {
return errors.New("missing 'konnectivity' key")
}
konnectivityVals["loadBalancerIP"] = "127.0.0.1"
return nil
}
@ -306,5 +320,11 @@ func prepareQEMUValues(values map[string]any) error {
}
verificationVals["loadBalancerIP"] = "127.0.0.1"
konnectivityVals, ok := values["konnectivity"].(map[string]any)
if !ok {
return errors.New("missing 'konnectivity' key")
}
konnectivityVals["loadBalancerIP"] = "127.0.0.1"
return nil
}

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: system:konnectivity-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:konnectivity-server

View File

@ -0,0 +1,76 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: konnectivity-agent
name: konnectivity-agent
namespace: testNamespace
spec:
selector:
matchLabels:
k8s-app: konnectivity-agent
template:
metadata:
labels:
k8s-app: konnectivity-agent
spec:
containers:
- args:
- --logtostderr=true
- --proxy-server-host=127.0.0.1
- --ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- --proxy-server-port=8132
- --admin-server-port=8133
- --health-server-port=8134
- --service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token
- --agent-identifiers=host=$(HOST_IP)
- --sync-forever=true
- --keepalive-time=60m
- --sync-interval=5s
- --sync-interval-cap=30s
- --probe-interval=5s
- --v=3
command:
- /proxy-agent
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
image:
livenessProbe:
httpGet:
path: /healthz
port: 8134
initialDelaySeconds: 15
timeoutSeconds: 15
name: konnectivity-agent
resources: {}
volumeMounts:
- mountPath: /var/run/secrets/tokens
name: konnectivity-agent-token
readOnly: true
priorityClassName: system-cluster-critical
serviceAccountName: konnectivity-agent
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- name: konnectivity-agent-token
projected:
sources:
- serviceAccountToken:
audience: system:konnectivity-server
path: konnectivity-agent-token
updateStrategy: {}

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: konnectivity-agent
namespace: testNamespace

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: system:konnectivity-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:konnectivity-server

View File

@ -0,0 +1,76 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: konnectivity-agent
name: konnectivity-agent
namespace: testNamespace
spec:
selector:
matchLabels:
k8s-app: konnectivity-agent
template:
metadata:
labels:
k8s-app: konnectivity-agent
spec:
containers:
- args:
- --logtostderr=true
- --proxy-server-host=127.0.0.1
- --ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- --proxy-server-port=8132
- --admin-server-port=8133
- --health-server-port=8134
- --service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token
- --agent-identifiers=host=$(HOST_IP)
- --sync-forever=true
- --keepalive-time=60m
- --sync-interval=5s
- --sync-interval-cap=30s
- --probe-interval=5s
- --v=3
command:
- /proxy-agent
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
image:
livenessProbe:
httpGet:
path: /healthz
port: 8134
initialDelaySeconds: 15
timeoutSeconds: 15
name: konnectivity-agent
resources: {}
volumeMounts:
- mountPath: /var/run/secrets/tokens
name: konnectivity-agent-token
readOnly: true
priorityClassName: system-cluster-critical
serviceAccountName: konnectivity-agent
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- name: konnectivity-agent-token
projected:
sources:
- serviceAccountToken:
audience: system:konnectivity-server
path: konnectivity-agent-token
updateStrategy: {}

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: konnectivity-agent
namespace: testNamespace

View File

@ -0,0 +1,15 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: system:konnectivity-server
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:konnectivity-server

View File

@ -0,0 +1,76 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: konnectivity-agent
name: konnectivity-agent
namespace: testNamespace
spec:
selector:
matchLabels:
k8s-app: konnectivity-agent
template:
metadata:
labels:
k8s-app: konnectivity-agent
spec:
containers:
- args:
- --logtostderr=true
- --proxy-server-host=127.0.0.1
- --ca-cert=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- --proxy-server-port=8132
- --admin-server-port=8133
- --health-server-port=8134
- --service-account-token-path=/var/run/secrets/tokens/konnectivity-agent-token
- --agent-identifiers=host=$(HOST_IP)
- --sync-forever=true
- --keepalive-time=60m
- --sync-interval=5s
- --sync-interval-cap=30s
- --probe-interval=5s
- --v=3
command:
- /proxy-agent
env:
- name: HOST_IP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
image:
livenessProbe:
httpGet:
path: /healthz
port: 8134
initialDelaySeconds: 15
timeoutSeconds: 15
name: konnectivity-agent
resources: {}
volumeMounts:
- mountPath: /var/run/secrets/tokens
name: konnectivity-agent-token
readOnly: true
priorityClassName: system-cluster-critical
serviceAccountName: konnectivity-agent
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
key: node.kubernetes.io/not-ready
operator: Exists
volumes:
- name: konnectivity-agent-token
projected:
sources:
- serviceAccountToken:
audience: system:konnectivity-server
path: konnectivity-agent-token
updateStrategy: {}

View File

@ -0,0 +1,8 @@
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
name: konnectivity-agent
namespace: testNamespace