mirror of
https://github.com/edgelesssys/constellation.git
synced 2024-12-15 10:54:29 -05:00
968cdc1a38
* cli: move internal packages Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * cli: fix buildfiles Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * bazel: fix exclude dir Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> * cli: move back libraries that will not be used by TF provider Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com> --------- Signed-off-by: Moritz Sanft <58110325+msanft@users.noreply.github.com>
3395 lines
118 KiB
Cheetah
3395 lines
118 KiB
Cheetah
|
|
# upgradeCompatibility helps users upgrading to ensure that the configMap for
|
|
# Cilium will not change critical values to ensure continued operation
|
|
# This flag is not required for new installations.
|
|
# For example: 1.7, 1.8, 1.9
|
|
# upgradeCompatibility: '1.8'
|
|
|
|
debug:
|
|
# -- Enable debug logging
|
|
enabled: false
|
|
# -- Configure verbosity levels for debug logging
|
|
# This option is used to enable debug messages for operations related to such
|
|
# sub-system such as (e.g. kvstore, envoy, datapath or policy), and flow is
|
|
# for enabling debug messages emitted per request, message and connection.
|
|
# Multiple values can be set via a space-separated string (e.g. "datapath envoy").
|
|
#
|
|
# Applicable values:
|
|
# - flow
|
|
# - kvstore
|
|
# - envoy
|
|
# - datapath
|
|
# - policy
|
|
verbose: ~
|
|
|
|
rbac:
|
|
# -- Enable creation of Resource-Based Access Control configuration.
|
|
create: true
|
|
|
|
# -- Configure image pull secrets for pulling container images
|
|
imagePullSecrets:
|
|
# - name: "image-pull-secret"
|
|
|
|
# -- (string) Kubernetes config path
|
|
# @default -- `"~/.kube/config"`
|
|
kubeConfigPath: ""
|
|
# -- (string) Kubernetes service host
|
|
k8sServiceHost: ""
|
|
# -- (string) Kubernetes service port
|
|
k8sServicePort: ""
|
|
|
|
# -- Configure the client side rate limit for the agent and operator
|
|
#
|
|
# If the amount of requests to the Kubernetes API server exceeds the configured
|
|
# rate limit, the agent and operator will start to throttle requests by delaying
|
|
# them until there is budget or the request times out.
|
|
k8sClientRateLimit:
|
|
# -- The sustained request rate in requests per second.
|
|
qps: 5
|
|
# -- The burst request rate in requests per second.
|
|
# The rate limiter will allow short bursts with a higher rate.
|
|
burst: 10
|
|
|
|
cluster:
|
|
# -- Name of the cluster. Only required for Cluster Mesh and mutual authentication with SPIRE.
|
|
name: default
|
|
# -- (int) Unique ID of the cluster. Must be unique across all connected
|
|
# clusters and in the range of 1 to 255. Only required for Cluster Mesh,
|
|
# may be 0 if Cluster Mesh is not used.
|
|
id: 0
|
|
|
|
# -- Define serviceAccount names for components.
|
|
# @default -- Component's fully qualified name.
|
|
serviceAccounts:
|
|
cilium:
|
|
create: true
|
|
name: cilium
|
|
automount: true
|
|
annotations: {}
|
|
nodeinit:
|
|
create: true
|
|
# -- Enabled is temporary until https://github.com/cilium/cilium-cli/issues/1396 is implemented.
|
|
# Cilium CLI doesn't create the SAs for node-init, thus the workaround. Helm is not affected by
|
|
# this issue. Name and automount can be configured, if enabled is set to true.
|
|
# Otherwise, they are ignored. Enabled can be removed once the issue is fixed.
|
|
# Cilium-nodeinit DS must also be fixed.
|
|
enabled: false
|
|
name: cilium-nodeinit
|
|
automount: true
|
|
annotations: {}
|
|
envoy:
|
|
create: true
|
|
name: cilium-envoy
|
|
automount: true
|
|
annotations: {}
|
|
etcd:
|
|
create: true
|
|
name: cilium-etcd-operator
|
|
automount: true
|
|
annotations: {}
|
|
operator:
|
|
create: true
|
|
name: cilium-operator
|
|
automount: true
|
|
annotations: {}
|
|
preflight:
|
|
create: true
|
|
name: cilium-pre-flight
|
|
automount: true
|
|
annotations: {}
|
|
relay:
|
|
create: true
|
|
name: hubble-relay
|
|
automount: false
|
|
annotations: {}
|
|
ui:
|
|
create: true
|
|
name: hubble-ui
|
|
automount: true
|
|
annotations: {}
|
|
clustermeshApiserver:
|
|
create: true
|
|
name: clustermesh-apiserver
|
|
automount: true
|
|
annotations: {}
|
|
# -- Clustermeshcertgen is used if clustermesh.apiserver.tls.auto.method=cronJob
|
|
clustermeshcertgen:
|
|
create: true
|
|
name: clustermesh-apiserver-generate-certs
|
|
automount: true
|
|
annotations: {}
|
|
# -- Hubblecertgen is used if hubble.tls.auto.method=cronJob
|
|
hubblecertgen:
|
|
create: true
|
|
name: hubble-generate-certs
|
|
automount: true
|
|
annotations: {}
|
|
|
|
# -- Configure termination grace period for cilium-agent DaemonSet.
|
|
terminationGracePeriodSeconds: 1
|
|
|
|
# -- Install the cilium agent resources.
|
|
agent: true
|
|
|
|
# -- Agent container name.
|
|
name: cilium
|
|
|
|
# -- Roll out cilium agent pods automatically when configmap is updated.
|
|
rollOutCiliumPods: false
|
|
|
|
# -- Agent container image.
|
|
image:
|
|
override: ~
|
|
repository: "${CILIUM_REPO}"
|
|
tag: "${CILIUM_VERSION}"
|
|
pullPolicy: "${PULL_POLICY}"
|
|
# cilium-digest
|
|
digest: ${CILIUM_DIGEST}
|
|
useDigest: ${USE_DIGESTS}
|
|
|
|
# -- Affinity for cilium-agent.
|
|
affinity:
|
|
podAntiAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- topologyKey: kubernetes.io/hostname
|
|
labelSelector:
|
|
matchLabels:
|
|
k8s-app: cilium
|
|
|
|
# -- Node selector for cilium-agent.
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
|
|
# -- Node tolerations for agent scheduling to nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations:
|
|
- operator: Exists
|
|
# - key: "key"
|
|
# operator: "Equal|Exists"
|
|
# value: "value"
|
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
|
|
|
# -- The priority class to use for cilium-agent.
|
|
priorityClassName: ""
|
|
|
|
# -- DNS policy for Cilium agent pods.
|
|
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
|
dnsPolicy: ""
|
|
|
|
# -- Additional containers added to the cilium DaemonSet.
|
|
extraContainers: []
|
|
|
|
# -- Additional agent container arguments.
|
|
extraArgs: []
|
|
|
|
# -- Additional agent container environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Additional agent hostPath mounts.
|
|
extraHostPathMounts: []
|
|
# - name: host-mnt-data
|
|
# mountPath: /host/mnt/data
|
|
# hostPath: /mnt/data
|
|
# hostPathType: Directory
|
|
# readOnly: true
|
|
# mountPropagation: HostToContainer
|
|
|
|
# -- Additional agent volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional agent volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- extraConfig allows you to specify additional configuration parameters to be
|
|
# included in the cilium-config configmap.
|
|
extraConfig: {}
|
|
# my-config-a: "1234"
|
|
# my-config-b: |-
|
|
# test 1
|
|
# test 2
|
|
# test 3
|
|
|
|
# -- Annotations to be added to all top-level cilium-agent objects (resources under templates/cilium-agent)
|
|
annotations: {}
|
|
|
|
# -- Security Context for cilium-agent pods.
|
|
podSecurityContext: {}
|
|
|
|
# -- Annotations to be added to agent pods
|
|
podAnnotations: {}
|
|
|
|
# -- Labels to be added to agent pods
|
|
podLabels: {}
|
|
|
|
# -- Agent resource limits & requests
|
|
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
resources: {}
|
|
# limits:
|
|
# cpu: 4000m
|
|
# memory: 4Gi
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 512Mi
|
|
|
|
securityContext:
|
|
# -- User to run the pod with
|
|
# runAsUser: 0
|
|
# -- Run the pod with elevated privileges
|
|
privileged: false
|
|
# -- SELinux options for the `cilium-agent` and init containers
|
|
seLinuxOptions:
|
|
level: 's0'
|
|
# Running with spc_t since we have removed the privileged mode.
|
|
# Users can change it to a different type as long as they have the
|
|
# type available on the system.
|
|
type: 'spc_t'
|
|
capabilities:
|
|
# -- Capabilities for the `cilium-agent` container
|
|
ciliumAgent:
|
|
# Use to set socket permission
|
|
- CHOWN
|
|
# Used to terminate envoy child process
|
|
- KILL
|
|
# Used since cilium modifies routing tables, etc...
|
|
- NET_ADMIN
|
|
# Used since cilium creates raw sockets, etc...
|
|
- NET_RAW
|
|
# Used since cilium monitor uses mmap
|
|
- IPC_LOCK
|
|
# Used in iptables. Consider removing once we are iptables-free
|
|
- SYS_MODULE
|
|
# We need it for now but might not need it for >= 5.11 specially
|
|
# for the 'SYS_RESOURCE'.
|
|
# In >= 5.8 there's already BPF and PERMON capabilities
|
|
- SYS_ADMIN
|
|
# Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC
|
|
- SYS_RESOURCE
|
|
# Both PERFMON and BPF requires kernel 5.8, container runtime
|
|
# cri-o >= v1.22.0 or containerd >= v1.5.0.
|
|
# If available, SYS_ADMIN can be removed.
|
|
#- PERFMON
|
|
#- BPF
|
|
# Allow discretionary access control (e.g. required for package installation)
|
|
- DAC_OVERRIDE
|
|
# Allow to set Access Control Lists (ACLs) on arbitrary files (e.g. required for package installation)
|
|
- FOWNER
|
|
# Allow to execute program that changes GID (e.g. required for package installation)
|
|
- SETGID
|
|
# Allow to execute program that changes UID (e.g. required for package installation)
|
|
- SETUID
|
|
# -- Capabilities for the `mount-cgroup` init container
|
|
mountCgroup:
|
|
# Only used for 'mount' cgroup
|
|
- SYS_ADMIN
|
|
# Used for nsenter
|
|
- SYS_CHROOT
|
|
- SYS_PTRACE
|
|
# -- capabilities for the `apply-sysctl-overwrites` init container
|
|
applySysctlOverwrites:
|
|
# Required in order to access host's /etc/sysctl.d dir
|
|
- SYS_ADMIN
|
|
# Used for nsenter
|
|
- SYS_CHROOT
|
|
- SYS_PTRACE
|
|
# -- Capabilities for the `clean-cilium-state` init container
|
|
cleanCiliumState:
|
|
# Most of the capabilities here are the same ones used in the
|
|
# cilium-agent's container because this container can be used to
|
|
# uninstall all Cilium resources, and therefore it is likely that
|
|
# will need the same capabilities.
|
|
# Used since cilium modifies routing tables, etc...
|
|
- NET_ADMIN
|
|
# Used in iptables. Consider removing once we are iptables-free
|
|
- SYS_MODULE
|
|
# We need it for now but might not need it for >= 5.11 specially
|
|
# for the 'SYS_RESOURCE'.
|
|
# In >= 5.8 there's already BPF and PERMON capabilities
|
|
- SYS_ADMIN
|
|
# Could be an alternative for the SYS_ADMIN for the RLIMIT_NPROC
|
|
- SYS_RESOURCE
|
|
# Both PERFMON and BPF requires kernel 5.8, container runtime
|
|
# cri-o >= v1.22.0 or containerd >= v1.5.0.
|
|
# If available, SYS_ADMIN can be removed.
|
|
#- PERFMON
|
|
#- BPF
|
|
|
|
# -- Cilium agent update strategy
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
rollingUpdate:
|
|
maxUnavailable: 2
|
|
|
|
# Configuration Values for cilium-agent
|
|
|
|
aksbyocni:
|
|
# -- Enable AKS BYOCNI integration.
|
|
# Note that this is incompatible with AKS clusters not created in BYOCNI mode:
|
|
# use Azure integration (`azure.enabled`) instead.
|
|
enabled: false
|
|
|
|
# -- Enable installation of PodCIDR routes between worker
|
|
# nodes if worker nodes share a common L2 network segment.
|
|
autoDirectNodeRoutes: false
|
|
|
|
# -- Annotate k8s node upon initialization with Cilium's metadata.
|
|
annotateK8sNode: false
|
|
|
|
azure:
|
|
# -- Enable Azure integration.
|
|
# Note that this is incompatible with AKS clusters created in BYOCNI mode: use
|
|
# AKS BYOCNI integration (`aksbyocni.enabled`) instead.
|
|
enabled: false
|
|
# usePrimaryAddress: false
|
|
# resourceGroup: group1
|
|
# subscriptionID: 00000000-0000-0000-0000-000000000000
|
|
# tenantID: 00000000-0000-0000-0000-000000000000
|
|
# clientID: 00000000-0000-0000-0000-000000000000
|
|
# clientSecret: 00000000-0000-0000-0000-000000000000
|
|
# userAssignedIdentityID: 00000000-0000-0000-0000-000000000000
|
|
|
|
alibabacloud:
|
|
# -- Enable AlibabaCloud ENI integration
|
|
enabled: false
|
|
|
|
# -- Enable bandwidth manager to optimize TCP and UDP workloads and allow
|
|
# for rate-limiting traffic from individual Pods with EDT (Earliest Departure
|
|
# Time) through the "kubernetes.io/egress-bandwidth" Pod annotation.
|
|
bandwidthManager:
|
|
# -- Enable bandwidth manager infrastructure (also prerequirement for BBR)
|
|
enabled: false
|
|
# -- Activate BBR TCP congestion control for Pods
|
|
bbr: false
|
|
|
|
# -- Configure standalone NAT46/NAT64 gateway
|
|
nat46x64Gateway:
|
|
# -- Enable RFC8215-prefixed translation
|
|
enabled: false
|
|
|
|
# -- EnableHighScaleIPcache enables the special ipcache mode for high scale
|
|
# clusters. The ipcache content will be reduced to the strict minimum and
|
|
# traffic will be encapsulated to carry security identities.
|
|
highScaleIPcache:
|
|
# -- Enable the high scale mode for the ipcache.
|
|
enabled: false
|
|
|
|
# -- Configure L2 announcements
|
|
l2announcements:
|
|
# -- Enable L2 announcements
|
|
enabled: false
|
|
# -- If a lease is not renewed for X duration, the current leader is considered dead, a new leader is picked
|
|
# leaseDuration: 15s
|
|
# -- The interval at which the leader will renew the lease
|
|
# leaseRenewDeadline: 5s
|
|
# -- The timeout between retries if renewal fails
|
|
# leaseRetryPeriod: 2s
|
|
|
|
# -- Configure L2 pod announcements
|
|
l2podAnnouncements:
|
|
# -- Enable L2 pod announcements
|
|
enabled: false
|
|
# -- Interface used for sending Gratuitous ARP pod announcements
|
|
interface: "eth0"
|
|
|
|
# -- Configure BGP
|
|
bgp:
|
|
# -- Enable BGP support inside Cilium; embeds a new ConfigMap for BGP inside
|
|
# cilium-agent and cilium-operator
|
|
enabled: false
|
|
announce:
|
|
# -- Enable allocation and announcement of service LoadBalancer IPs
|
|
loadbalancerIP: false
|
|
# -- Enable announcement of node pod CIDR
|
|
podCIDR: false
|
|
|
|
# -- This feature set enables virtual BGP routers to be created via
|
|
# CiliumBGPPeeringPolicy CRDs.
|
|
bgpControlPlane:
|
|
# -- Enables the BGP control plane.
|
|
enabled: false
|
|
# -- SecretsNamespace is the namespace which BGP support will retrieve secrets from.
|
|
secretsNamespace:
|
|
# -- Create secrets namespace for BGP secrets.
|
|
create: true
|
|
# -- The name of the secret namespace to which Cilium agents are given read access
|
|
name: cilium-bgp-secrets
|
|
|
|
pmtuDiscovery:
|
|
# -- Enable path MTU discovery to send ICMP fragmentation-needed replies to
|
|
# the client.
|
|
enabled: false
|
|
|
|
bpf:
|
|
autoMount:
|
|
# -- Enable automatic mount of BPF filesystem
|
|
# When `autoMount` is enabled, the BPF filesystem is mounted at
|
|
# `bpf.root` path on the underlying host and inside the cilium agent pod.
|
|
# If users disable `autoMount`, it's expected that users have mounted
|
|
# bpffs filesystem at the specified `bpf.root` volume, and then the
|
|
# volume will be mounted inside the cilium agent pod at the same path.
|
|
enabled: true
|
|
# -- Configure the mount point for the BPF filesystem
|
|
root: /sys/fs/bpf
|
|
|
|
# -- Enables pre-allocation of eBPF map values. This increases
|
|
# memory usage but can reduce latency.
|
|
preallocateMaps: false
|
|
|
|
# -- (int) Configure the maximum number of entries in auth map.
|
|
# @default -- `524288`
|
|
authMapMax: ~
|
|
|
|
# -- (int) Configure the maximum number of entries in the TCP connection tracking
|
|
# table.
|
|
# @default -- `524288`
|
|
ctTcpMax: ~
|
|
|
|
# -- (int) Configure the maximum number of entries for the non-TCP connection
|
|
# tracking table.
|
|
# @default -- `262144`
|
|
ctAnyMax: ~
|
|
|
|
# -- Configure the maximum number of service entries in the
|
|
# load balancer maps.
|
|
lbMapMax: 65536
|
|
|
|
# -- (int) Configure the maximum number of entries for the NAT table.
|
|
# @default -- `524288`
|
|
natMax: ~
|
|
|
|
# -- (int) Configure the maximum number of entries for the neighbor table.
|
|
# @default -- `524288`
|
|
neighMax: ~
|
|
|
|
# -- Configure the maximum number of entries in endpoint policy map (per endpoint).
|
|
policyMapMax: 16384
|
|
|
|
# -- (float64) Configure auto-sizing for all BPF maps based on available memory.
|
|
# ref: https://docs.cilium.io/en/stable/network/ebpf/maps/
|
|
# @default -- `0.0025`
|
|
mapDynamicSizeRatio: ~
|
|
|
|
# -- Configure the level of aggregation for monitor notifications.
|
|
# Valid options are none, low, medium, maximum.
|
|
monitorAggregation: medium
|
|
|
|
# -- Configure the typical time between monitor notifications for
|
|
# active connections.
|
|
monitorInterval: "5s"
|
|
|
|
# -- Configure which TCP flags trigger notifications when seen for the
|
|
# first time in a connection.
|
|
monitorFlags: "all"
|
|
|
|
# -- Allow cluster external access to ClusterIP services.
|
|
lbExternalClusterIP: false
|
|
|
|
# -- (bool) Enable native IP masquerade support in eBPF
|
|
# @default -- `false`
|
|
masquerade: ~
|
|
|
|
# -- (bool) Configure whether direct routing mode should route traffic via
|
|
# host stack (true) or directly and more efficiently out of BPF (false) if
|
|
# the kernel supports it. The latter has the implication that it will also
|
|
# bypass netfilter in the host namespace.
|
|
# @default -- `false`
|
|
hostLegacyRouting: ~
|
|
|
|
# -- (bool) Configure the eBPF-based TPROXY to reduce reliance on iptables rules
|
|
# for implementing Layer 7 policy.
|
|
# @default -- `false`
|
|
tproxy: ~
|
|
|
|
# -- (list) Configure explicitly allowed VLAN id's for bpf logic bypass.
|
|
# [0] will allow all VLAN id's without any filtering.
|
|
# @default -- `[]`
|
|
vlanBypass: ~
|
|
|
|
# -- Enable BPF clock source probing for more efficient tick retrieval.
|
|
bpfClockProbe: false
|
|
|
|
# -- Clean all eBPF datapath state from the initContainer of the cilium-agent
|
|
# DaemonSet.
|
|
#
|
|
# WARNING: Use with care!
|
|
cleanBpfState: false
|
|
|
|
# -- Clean all local Cilium state from the initContainer of the cilium-agent
|
|
# DaemonSet. Implies cleanBpfState: true.
|
|
#
|
|
# WARNING: Use with care!
|
|
cleanState: false
|
|
|
|
# -- Wait for KUBE-PROXY-CANARY iptables rule to appear in "wait-for-kube-proxy"
|
|
# init container before launching cilium-agent.
|
|
# More context can be found in the commit message of below PR
|
|
# https://github.com/cilium/cilium/pull/20123
|
|
waitForKubeProxy: false
|
|
|
|
cni:
|
|
# -- Install the CNI configuration and binary files into the filesystem.
|
|
install: true
|
|
|
|
# -- Remove the CNI configuration and binary files on agent shutdown. Enable this
|
|
# if you're removing Cilium from the cluster. Disable this to prevent the CNI
|
|
# configuration file from being removed during agent upgrade, which can cause
|
|
# nodes to go unmanageable.
|
|
uninstall: false
|
|
|
|
# -- Configure chaining on top of other CNI plugins. Possible values:
|
|
# - none
|
|
# - aws-cni
|
|
# - flannel
|
|
# - generic-veth
|
|
# - portmap
|
|
chainingMode: ~
|
|
|
|
# -- A CNI network name in to which the Cilium plugin should be added as a chained plugin.
|
|
# This will cause the agent to watch for a CNI network with this network name. When it is
|
|
# found, this will be used as the basis for Cilium's CNI configuration file. If this is
|
|
# set, it assumes a chaining mode of generic-veth. As a special case, a chaining mode
|
|
# of aws-cni implies a chainingTarget of aws-cni.
|
|
chainingTarget: ~
|
|
|
|
# -- Make Cilium take ownership over the `/etc/cni/net.d` directory on the
|
|
# node, renaming all non-Cilium CNI configurations to `*.cilium_bak`.
|
|
# This ensures no Pods can be scheduled using other CNI plugins during Cilium
|
|
# agent downtime.
|
|
exclusive: true
|
|
|
|
# -- Configure the log file for CNI logging with retention policy of 7 days.
|
|
# Disable CNI file logging by setting this field to empty explicitly.
|
|
logFile: /var/run/cilium/cilium-cni.log
|
|
|
|
# -- Skip writing of the CNI configuration. This can be used if
|
|
# writing of the CNI configuration is performed by external automation.
|
|
customConf: false
|
|
|
|
# -- Configure the path to the CNI configuration directory on the host.
|
|
confPath: /etc/cni/net.d
|
|
|
|
# -- Configure the path to the CNI binary directory on the host.
|
|
binPath: /opt/cni/bin
|
|
|
|
# -- Specify the path to a CNI config to read from on agent start.
|
|
# This can be useful if you want to manage your CNI
|
|
# configuration outside of a Kubernetes environment. This parameter is
|
|
# mutually exclusive with the 'cni.configMap' parameter. The agent will
|
|
# write this to 05-cilium.conflist on startup.
|
|
# readCniConf: /host/etc/cni/net.d/05-sample.conflist.input
|
|
|
|
# -- When defined, configMap will mount the provided value as ConfigMap and
|
|
# interpret the cniConf variable as CNI configuration file and write it
|
|
# when the agent starts up
|
|
# configMap: cni-configuration
|
|
|
|
# -- Configure the key in the CNI ConfigMap to read the contents of
|
|
# the CNI configuration from.
|
|
configMapKey: cni-config
|
|
|
|
# -- Configure the path to where to mount the ConfigMap inside the agent pod.
|
|
confFileMountPath: /tmp/cni-configuration
|
|
|
|
# -- Configure the path to where the CNI configuration directory is mounted
|
|
# inside the agent pod.
|
|
hostConfDirMountPath: /host/etc/cni/net.d
|
|
|
|
# -- (string) Configure how frequently garbage collection should occur for the datapath
|
|
# connection tracking table.
|
|
# @default -- `"0s"`
|
|
conntrackGCInterval: ""
|
|
|
|
# -- (string) Configure the maximum frequency for the garbage collection of the
|
|
# connection tracking table. Only affects the automatic computation for the frequency
|
|
# and has no effect when 'conntrackGCInterval' is set. This can be set to more frequently
|
|
# clean up unused identities created from ToFQDN policies.
|
|
conntrackGCMaxInterval: ""
|
|
|
|
# -- Configure container runtime specific integration.
|
|
# Deprecated in favor of bpf.autoMount.enabled. To be removed in 1.15.
|
|
containerRuntime:
|
|
# -- Enables specific integrations for container runtimes.
|
|
# Supported values:
|
|
# - crio
|
|
# - none
|
|
integration: none
|
|
|
|
# -- (string) Configure timeout in which Cilium will exit if CRDs are not available
|
|
# @default -- `"5m"`
|
|
crdWaitTimeout: ""
|
|
|
|
# -- Tail call hooks for custom eBPF programs.
|
|
customCalls:
|
|
# -- Enable tail call hooks for custom eBPF programs.
|
|
enabled: false
|
|
|
|
daemon:
|
|
# -- Configure where Cilium runtime state should be stored.
|
|
runPath: "/var/run/cilium"
|
|
|
|
# -- Configure a custom list of possible configuration override sources
|
|
# The default is "config-map:cilium-config,cilium-node-config". For supported
|
|
# values, see the help text for the build-config subcommand.
|
|
# Note that this value should be a comma-separated string.
|
|
configSources: ~
|
|
|
|
# -- allowedConfigOverrides is a list of config-map keys that can be overridden.
|
|
# That is to say, if this value is set, config sources (excepting the first one) can
|
|
# only override keys in this list.
|
|
#
|
|
# This takes precedence over blockedConfigOverrides.
|
|
#
|
|
# By default, all keys may be overridden. To disable overrides, set this to "none" or
|
|
# change the configSources variable.
|
|
allowedConfigOverrides: ~
|
|
|
|
# -- blockedConfigOverrides is a list of config-map keys that may not be overridden.
|
|
# In other words, if any of these keys appear in a configuration source excepting the
|
|
# first one, they will be ignored
|
|
#
|
|
# This is ignored if allowedConfigOverrides is set.
|
|
#
|
|
# By default, all keys may be overridden.
|
|
blockedConfigOverrides: ~
|
|
|
|
# -- Specify which network interfaces can run the eBPF datapath. This means
|
|
# that a packet sent from a pod to a destination outside the cluster will be
|
|
# masqueraded (to an output device IPv4 address), if the output device runs the
|
|
# program. When not specified, probing will automatically detect devices that have
|
|
# a non-local route. This should be used only when autodetection is not suitable.
|
|
# devices: ""
|
|
|
|
# -- Enables experimental support for the detection of new and removed datapath
|
|
# devices. When devices change the eBPF datapath is reloaded and services updated.
|
|
# If "devices" is set then only those devices, or devices matching a wildcard will
|
|
# be considered.
|
|
enableRuntimeDeviceDetection: false
|
|
|
|
# -- Chains to ignore when installing feeder rules.
|
|
# disableIptablesFeederRules: ""
|
|
|
|
# -- Limit iptables-based egress masquerading to interface selector.
|
|
# egressMasqueradeInterfaces: ""
|
|
|
|
# -- Whether to enable CNP status updates.
|
|
enableCnpStatusUpdates: false
|
|
|
|
# -- Configures the use of the KVStore to optimize Kubernetes event handling by
|
|
# mirroring it into the KVstore for reduced overhead in large clusters.
|
|
enableK8sEventHandover: false
|
|
|
|
# -- Enable setting identity mark for local traffic.
|
|
# enableIdentityMark: true
|
|
|
|
# -- Enable Kubernetes EndpointSlice feature in Cilium if the cluster supports it.
|
|
# enableK8sEndpointSlice: true
|
|
|
|
# -- Enable CiliumEndpointSlice feature.
|
|
enableCiliumEndpointSlice: false
|
|
|
|
envoyConfig:
|
|
# -- Enable CiliumEnvoyConfig CRD
|
|
# CiliumEnvoyConfig CRD can also be implicitly enabled by other options.
|
|
enabled: false
|
|
|
|
# -- SecretsNamespace is the namespace in which envoy SDS will retrieve secrets from.
|
|
secretsNamespace:
|
|
# -- Create secrets namespace for CiliumEnvoyConfig CRDs.
|
|
create: true
|
|
|
|
# -- The name of the secret namespace to which Cilium agents are given read access.
|
|
name: cilium-secrets
|
|
|
|
ingressController:
|
|
# -- Enable cilium ingress controller
|
|
# This will automatically set enable-envoy-config as well.
|
|
enabled: false
|
|
|
|
# -- Set cilium ingress controller to be the default ingress controller
|
|
# This will let cilium ingress controller route entries without ingress class set
|
|
default: false
|
|
|
|
# -- Default ingress load balancer mode
|
|
# Supported values: shared, dedicated
|
|
# For granular control, use the following annotations on the ingress resource
|
|
# ingress.cilium.io/loadbalancer-mode: shared|dedicated,
|
|
loadbalancerMode: dedicated
|
|
|
|
# -- Enforce https for host having matching TLS host in Ingress.
|
|
# Incoming traffic to http listener will return 308 http error code with respective location in header.
|
|
enforceHttps: true
|
|
|
|
# -- Enable proxy protocol for all Ingress listeners. Note that _only_ Proxy protocol traffic will be accepted once this is enabled.
|
|
enableProxyProtocol: false
|
|
|
|
# -- IngressLBAnnotations are the annotation prefixes, which are used to filter annotations to propagate
|
|
# from Ingress to the Load Balancer service
|
|
ingressLBAnnotationPrefixes: ['service.beta.kubernetes.io', 'service.kubernetes.io', 'cloud.google.com']
|
|
|
|
# -- Default secret namespace for ingresses without .spec.tls[].secretName set.
|
|
defaultSecretNamespace:
|
|
|
|
# -- Default secret name for ingresses without .spec.tls[].secretName set.
|
|
defaultSecretName:
|
|
|
|
# -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from.
|
|
secretsNamespace:
|
|
# -- Create secrets namespace for Ingress.
|
|
create: true
|
|
|
|
# -- Name of Ingress secret namespace.
|
|
name: cilium-secrets
|
|
|
|
# -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name.
|
|
# If disabled, TLS secrets must be maintained externally.
|
|
sync: true
|
|
|
|
# -- Load-balancer service in shared mode.
|
|
# This is a single load-balancer service for all Ingress resources.
|
|
service:
|
|
# -- Service name
|
|
name: cilium-ingress
|
|
# -- Labels to be added for the shared LB service
|
|
labels: {}
|
|
# -- Annotations to be added for the shared LB service
|
|
annotations: {}
|
|
# -- Service type for the shared LB service
|
|
type: LoadBalancer
|
|
# -- Configure a specific nodePort for insecure HTTP traffic on the shared LB service
|
|
insecureNodePort: ~
|
|
# -- Configure a specific nodePort for secure HTTPS traffic on the shared LB service
|
|
secureNodePort : ~
|
|
# -- Configure a specific loadBalancerClass on the shared LB service (requires Kubernetes 1.24+)
|
|
loadBalancerClass: ~
|
|
# -- Configure a specific loadBalancerIP on the shared LB service
|
|
loadBalancerIP : ~
|
|
# -- Configure if node port allocation is required for LB service
|
|
# ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation
|
|
allocateLoadBalancerNodePorts: ~
|
|
|
|
gatewayAPI:
|
|
# -- Enable support for Gateway API in cilium
|
|
# This will automatically set enable-envoy-config as well.
|
|
enabled: false
|
|
|
|
# -- SecretsNamespace is the namespace in which envoy SDS will retrieve TLS secrets from.
|
|
secretsNamespace:
|
|
# -- Create secrets namespace for Gateway API.
|
|
create: true
|
|
|
|
# -- Name of Gateway API secret namespace.
|
|
name: cilium-secrets
|
|
|
|
# -- Enable secret sync, which will make sure all TLS secrets used by Ingress are synced to secretsNamespace.name.
|
|
# If disabled, TLS secrets must be maintained externally.
|
|
sync: true
|
|
|
|
# -- Enables the fallback compatibility solution for when the xt_socket kernel
|
|
# module is missing and it is needed for the datapath L7 redirection to work
|
|
# properly. See documentation for details on when this can be disabled:
|
|
# https://docs.cilium.io/en/stable/operations/system_requirements/#linux-kernel.
|
|
enableXTSocketFallback: true
|
|
|
|
encryption:
|
|
# -- Enable transparent network encryption.
|
|
enabled: false
|
|
|
|
# -- Encryption method. Can be either ipsec or wireguard.
|
|
type: ipsec
|
|
|
|
# -- Enable encryption for pure node to node traffic.
|
|
# This option is only effective when encryption.type is set to "wireguard".
|
|
nodeEncryption: false
|
|
|
|
# -- Configure the WireGuard Pod2Pod strict mode.
|
|
strictMode:
|
|
# -- Enable WireGuard Pod2Pod strict mode.
|
|
enabled: false
|
|
|
|
# -- CIDR for the WireGuard Pod2Pod strict mode.
|
|
cidr: ""
|
|
|
|
# -- Allow dynamic lookup of remote node identities.
|
|
# This is required when tunneling is used or direct routing is used and the node CIDR and pod CIDR overlap.
|
|
allowRemoteNodeIdentities: false
|
|
|
|
ipsec:
|
|
# -- Name of the key file inside the Kubernetes secret configured via secretName.
|
|
keyFile: ""
|
|
|
|
# -- Path to mount the secret inside the Cilium pod.
|
|
mountPath: ""
|
|
|
|
# -- Name of the Kubernetes secret containing the encryption keys.
|
|
secretName: ""
|
|
|
|
# -- The interface to use for encrypted traffic.
|
|
interface: ""
|
|
|
|
# -- Enable the key watcher. If disabled, a restart of the agent will be
|
|
# necessary on key rotations.
|
|
keyWatcher: true
|
|
|
|
# -- Maximum duration of the IPsec key rotation. The previous key will be
|
|
# removed after that delay.
|
|
keyRotationDuration: "5m"
|
|
|
|
wireguard:
|
|
# -- Enables the fallback to the user-space implementation.
|
|
userspaceFallback: false
|
|
# -- Controls Wireguard PersistentKeepalive option. Set 0s to disable.
|
|
persistentKeepalive: 0s
|
|
|
|
# -- Deprecated in favor of encryption.ipsec.keyFile. To be removed in 1.15.
|
|
# Name of the key file inside the Kubernetes secret configured via secretName.
|
|
# This option is only effective when encryption.type is set to ipsec.
|
|
keyFile: keys
|
|
|
|
# -- Deprecated in favor of encryption.ipsec.mountPath. To be removed in 1.15.
|
|
# Path to mount the secret inside the Cilium pod.
|
|
# This option is only effective when encryption.type is set to ipsec.
|
|
mountPath: /etc/ipsec
|
|
|
|
# -- Deprecated in favor of encryption.ipsec.secretName. To be removed in 1.15.
|
|
# Name of the Kubernetes secret containing the encryption keys.
|
|
# This option is only effective when encryption.type is set to ipsec.
|
|
secretName: cilium-ipsec-keys
|
|
|
|
# -- Deprecated in favor of encryption.ipsec.interface. To be removed in 1.15.
|
|
# The interface to use for encrypted traffic.
|
|
# This option is only effective when encryption.type is set to ipsec.
|
|
interface: ""
|
|
|
|
endpointHealthChecking:
|
|
# -- Enable connectivity health checking between virtual endpoints.
|
|
enabled: true
|
|
|
|
# -- Enable endpoint status.
|
|
# Status can be: policy, health, controllers, log and / or state. For 2 or more options use a space.
|
|
endpointStatus:
|
|
enabled: false
|
|
status: ""
|
|
|
|
endpointRoutes:
|
|
# -- Enable use of per endpoint routes instead of routing via
|
|
# the cilium_host interface.
|
|
enabled: false
|
|
|
|
k8sNetworkPolicy:
|
|
# -- Enable support for K8s NetworkPolicy
|
|
enabled: true
|
|
|
|
eni:
|
|
# -- Enable Elastic Network Interface (ENI) integration.
|
|
enabled: false
|
|
# -- Update ENI Adapter limits from the EC2 API
|
|
updateEC2AdapterLimitViaAPI: true
|
|
# -- Release IPs not used from the ENI
|
|
awsReleaseExcessIPs: false
|
|
# -- Enable ENI prefix delegation
|
|
awsEnablePrefixDelegation: false
|
|
# -- EC2 API endpoint to use
|
|
ec2APIEndpoint: ""
|
|
# -- Tags to apply to the newly created ENIs
|
|
eniTags: {}
|
|
# -- Interval for garbage collection of unattached ENIs. Set to "0s" to disable.
|
|
# @default -- `"5m"`
|
|
gcInterval: ""
|
|
# -- Additional tags attached to ENIs created by Cilium.
|
|
# Dangling ENIs with this tag will be garbage collected
|
|
# @default -- `{"io.cilium/cilium-managed":"true,"io.cilium/cluster-name":"<auto-detected>"}`
|
|
gcTags: {}
|
|
# -- If using IAM role for Service Accounts will not try to
|
|
# inject identity values from cilium-aws kubernetes secret.
|
|
# Adds annotation to service account if managed by Helm.
|
|
# See https://github.com/aws/amazon-eks-pod-identity-webhook
|
|
iamRole: ""
|
|
# -- Filter via subnet IDs which will dictate which subnets are going to be used to create new ENIs
|
|
# Important note: This requires that each instance has an ENI with a matching subnet attached
|
|
# when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium,
|
|
# use the CNI configuration file settings (cni.customConf) instead.
|
|
subnetIDsFilter: []
|
|
# -- Filter via tags (k=v) which will dictate which subnets are going to be used to create new ENIs
|
|
# Important note: This requires that each instance has an ENI with a matching subnet attached
|
|
# when Cilium is deployed. If you only want to control subnets for ENIs attached by Cilium,
|
|
# use the CNI configuration file settings (cni.customConf) instead.
|
|
subnetTagsFilter: []
|
|
# -- Filter via AWS EC2 Instance tags (k=v) which will dictate which AWS EC2 Instances
|
|
# are going to be used to create new ENIs
|
|
instanceTagsFilter: []
|
|
|
|
externalIPs:
|
|
# -- Enable ExternalIPs service support.
|
|
enabled: false
|
|
|
|
# fragmentTracking enables IPv4 fragment tracking support in the datapath.
|
|
# fragmentTracking: true
|
|
|
|
gke:
|
|
# -- Enable Google Kubernetes Engine integration
|
|
enabled: false
|
|
|
|
# -- Enable connectivity health checking.
|
|
healthChecking: true
|
|
|
|
# -- TCP port for the agent health API. This is not the port for cilium-health.
|
|
healthPort: 9879
|
|
|
|
# -- Configure the host firewall.
|
|
hostFirewall:
|
|
# -- Enables the enforcement of host policies in the eBPF datapath.
|
|
enabled: false
|
|
|
|
hostPort:
|
|
# -- Enable hostPort service support.
|
|
enabled: false
|
|
|
|
# -- Configure socket LB
|
|
socketLB:
|
|
# -- Enable socket LB
|
|
enabled: false
|
|
|
|
# -- Disable socket lb for non-root ns. This is used to enable Istio routing rules.
|
|
# hostNamespaceOnly: false
|
|
|
|
# -- Configure certificate generation for Hubble integration.
|
|
# If hubble.tls.auto.method=cronJob, these values are used
|
|
# for the Kubernetes CronJob which will be scheduled regularly to
|
|
# (re)generate any certificates not provided manually.
|
|
certgen:
|
|
image:
|
|
override: ~
|
|
repository: "${CERTGEN_REPO}"
|
|
tag: "${CERTGEN_VERSION}"
|
|
digest: "${CERTGEN_DIGEST}"
|
|
useDigest: true
|
|
pullPolicy: "${PULL_POLICY}"
|
|
# -- Seconds after which the completed job pod will be deleted
|
|
ttlSecondsAfterFinished: 1800
|
|
# -- Labels to be added to hubble-certgen pods
|
|
podLabels: {}
|
|
# -- Annotations to be added to the hubble-certgen initial Job and CronJob
|
|
annotations:
|
|
job: {}
|
|
cronJob: {}
|
|
# -- Node tolerations for pod assignment on nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations: []
|
|
|
|
# -- Additional certgen volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional certgen volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- Affinity for certgen
|
|
affinity: {}
|
|
|
|
hubble:
|
|
# -- Enable Hubble (true by default).
|
|
enabled: true
|
|
|
|
# -- Annotations to be added to all top-level hubble objects (resources under templates/hubble)
|
|
annotations: {}
|
|
|
|
# -- Buffer size of the channel Hubble uses to receive monitor events. If this
|
|
# value is not set, the queue size is set to the default monitor queue size.
|
|
# eventQueueSize: ""
|
|
|
|
# -- Number of recent flows for Hubble to cache. Defaults to 4095.
|
|
# Possible values are:
|
|
# 1, 3, 7, 15, 31, 63, 127, 255, 511, 1023,
|
|
# 2047, 4095, 8191, 16383, 32767, 65535
|
|
# eventBufferCapacity: "4095"
|
|
|
|
# -- Hubble metrics configuration.
|
|
# See https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics
|
|
# for more comprehensive documentation about Hubble metrics.
|
|
metrics:
|
|
# -- Configures the list of metrics to collect. If empty or null, metrics
|
|
# are disabled.
|
|
# Example:
|
|
#
|
|
# enabled:
|
|
# - dns:query;ignoreAAAA
|
|
# - drop
|
|
# - tcp
|
|
# - flow
|
|
# - icmp
|
|
# - http
|
|
#
|
|
# You can specify the list of metrics from the helm CLI:
|
|
#
|
|
# --set hubble.metrics.enabled="{dns:query;ignoreAAAA,drop,tcp,flow,icmp,http}"
|
|
#
|
|
enabled: ~
|
|
# -- Enables exporting hubble metrics in OpenMetrics format.
|
|
enableOpenMetrics: false
|
|
# -- Configure the port the hubble metric server listens on.
|
|
port: 9965
|
|
# -- Annotations to be added to hubble-metrics service.
|
|
serviceAnnotations: {}
|
|
serviceMonitor:
|
|
# -- Create ServiceMonitor resources for Prometheus Operator.
|
|
# This requires the prometheus CRDs to be available.
|
|
# ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
|
enabled: false
|
|
# -- Labels to add to ServiceMonitor hubble
|
|
labels: {}
|
|
# -- Annotations to add to ServiceMonitor hubble
|
|
annotations: {}
|
|
# -- jobLabel to add for ServiceMonitor hubble
|
|
jobLabel: ""
|
|
# -- Interval for scrape metrics.
|
|
interval: "10s"
|
|
# -- Relabeling configs for the ServiceMonitor hubble
|
|
relabelings:
|
|
- sourceLabels:
|
|
- __meta_kubernetes_pod_node_name
|
|
targetLabel: node
|
|
replacement: ${1}
|
|
# -- Metrics relabeling configs for the ServiceMonitor hubble
|
|
metricRelabelings: ~
|
|
# -- Grafana dashboards for hubble
|
|
# grafana can import dashboards based on the label and value
|
|
# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
|
|
dashboards:
|
|
enabled: false
|
|
label: grafana_dashboard
|
|
namespace: ~
|
|
labelValue: "1"
|
|
annotations: {}
|
|
|
|
# -- Unix domain socket path to listen to when Hubble is enabled.
|
|
socketPath: /var/run/cilium/hubble.sock
|
|
|
|
# -- Enables redacting sensitive information present in Layer 7 flows.
|
|
redact:
|
|
enabled: false
|
|
http:
|
|
# -- Enables redacting URL query (GET) parameters.
|
|
# Example:
|
|
#
|
|
# redact:
|
|
# enabled: true
|
|
# http:
|
|
# urlQuery: true
|
|
#
|
|
# You can specify the options from the helm CLI:
|
|
#
|
|
# --set hubble.redact.enabled="true"
|
|
# --set hubble.redact.http.urlQuery="true"
|
|
urlQuery: false
|
|
headers:
|
|
# -- List of HTTP headers to allow: headers not matching will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present.
|
|
# Example:
|
|
# redact:
|
|
# enabled: true
|
|
# http:
|
|
# headers:
|
|
# allow:
|
|
# - traceparent
|
|
# - tracestate
|
|
# - Cache-Control
|
|
#
|
|
# You can specify the options from the helm CLI:
|
|
# --set hubble.redact.enabled="true"
|
|
# --set hubble.redact.http.headers.allow="traceparent,tracestate,Cache-Control"
|
|
allow: []
|
|
# -- List of HTTP headers to deny: matching headers will be redacted. Note: `allow` and `deny` lists cannot be used both at the same time, only one can be present.
|
|
# Example:
|
|
# redact:
|
|
# enabled: true
|
|
# http:
|
|
# headers:
|
|
# deny:
|
|
# - Authorization
|
|
# - Proxy-Authorization
|
|
#
|
|
# You can specify the options from the helm CLI:
|
|
# --set hubble.redact.enabled="true"
|
|
# --set hubble.redact.http.headers.deny="Authorization,Proxy-Authorization"
|
|
deny: []
|
|
kafka:
|
|
# -- Enables redacting Kafka's API key.
|
|
# Example:
|
|
#
|
|
# redact:
|
|
# enabled: true
|
|
# kafka:
|
|
# apiKey: true
|
|
#
|
|
# You can specify the options from the helm CLI:
|
|
#
|
|
# --set hubble.redact.enabled="true"
|
|
# --set hubble.redact.kafka.apiKey="true"
|
|
apiKey: false
|
|
|
|
# -- An additional address for Hubble to listen to.
|
|
# Set this field ":4244" if you are enabling Hubble Relay, as it assumes that
|
|
# Hubble is listening on port 4244.
|
|
listenAddress: ":4244"
|
|
# -- Whether Hubble should prefer to announce IPv6 or IPv4 addresses if both are available.
|
|
preferIpv6: false
|
|
# -- (bool) Skip Hubble events with unknown cgroup ids
|
|
# @default -- `true`
|
|
skipUnknownCGroupIDs: ~
|
|
|
|
peerService:
|
|
# -- Service Port for the Peer service.
|
|
# If not set, it is dynamically assigned to port 443 if TLS is enabled and to
|
|
# port 80 if not.
|
|
# servicePort: 80
|
|
# -- Target Port for the Peer service, must match the hubble.listenAddress'
|
|
# port.
|
|
targetPort: 4244
|
|
# -- The cluster domain to use to query the Hubble Peer service. It should
|
|
# be the local cluster.
|
|
clusterDomain: cluster.local
|
|
# -- TLS configuration for Hubble
|
|
tls:
|
|
# -- Enable mutual TLS for listenAddress. Setting this value to false is
|
|
# highly discouraged as the Hubble API provides access to potentially
|
|
# sensitive network flow metadata and is exposed on the host network.
|
|
enabled: true
|
|
# -- Configure automatic TLS certificates generation.
|
|
auto:
|
|
# -- Auto-generate certificates.
|
|
# When set to true, automatically generate a CA and certificates to
|
|
# enable mTLS between Hubble server and Hubble Relay instances. If set to
|
|
# false, the certs for Hubble server need to be provided by setting
|
|
# appropriate values below.
|
|
enabled: true
|
|
# -- Set the method to auto-generate certificates. Supported values:
|
|
# - helm: This method uses Helm to generate all certificates.
|
|
# - cronJob: This method uses a Kubernetes CronJob the generate any
|
|
# certificates not provided by the user at installation
|
|
# time.
|
|
# - certmanager: This method use cert-manager to generate & rotate certificates.
|
|
method: helm
|
|
# -- Generated certificates validity duration in days.
|
|
certValidityDuration: 1095
|
|
# -- Schedule for certificates regeneration (regardless of their expiration date).
|
|
# Only used if method is "cronJob". If nil, then no recurring job will be created.
|
|
# Instead, only the one-shot job is deployed to generate the certificates at
|
|
# installation time.
|
|
#
|
|
# Defaults to midnight of the first day of every fourth month. For syntax, see
|
|
# https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax
|
|
schedule: "0 0 1 */4 *"
|
|
|
|
# [Example]
|
|
# certManagerIssuerRef:
|
|
# group: cert-manager.io
|
|
# kind: ClusterIssuer
|
|
# name: ca-issuer
|
|
# -- certmanager issuer used when hubble.tls.auto.method=certmanager.
|
|
certManagerIssuerRef: {}
|
|
|
|
# -- base64 encoded PEM values for the Hubble server certificate and private key
|
|
server:
|
|
cert: ""
|
|
key: ""
|
|
# -- Extra DNS names added to certificate when it's auto generated
|
|
extraDnsNames: []
|
|
# -- Extra IP addresses added to certificate when it's auto generated
|
|
extraIpAddresses: []
|
|
|
|
relay:
|
|
# -- Enable Hubble Relay (requires hubble.enabled=true)
|
|
enabled: false
|
|
|
|
# -- Roll out Hubble Relay pods automatically when configmap is updated.
|
|
rollOutPods: false
|
|
|
|
# -- Hubble-relay container image.
|
|
image:
|
|
override: ~
|
|
repository: "${HUBBLE_RELAY_REPO}"
|
|
tag: "${CILIUM_VERSION}"
|
|
# hubble-relay-digest
|
|
digest: ${HUBBLE_RELAY_DIGEST}
|
|
useDigest: ${USE_DIGESTS}
|
|
pullPolicy: "${PULL_POLICY}"
|
|
|
|
# -- Specifies the resources for the hubble-relay pods
|
|
resources: {}
|
|
|
|
# -- Number of replicas run for the hubble-relay deployment.
|
|
replicas: 1
|
|
|
|
# -- Affinity for hubble-replay
|
|
affinity:
|
|
podAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- topologyKey: kubernetes.io/hostname
|
|
labelSelector:
|
|
matchLabels:
|
|
k8s-app: cilium
|
|
|
|
# -- Pod topology spread constraints for hubble-relay
|
|
topologySpreadConstraints: []
|
|
# - maxSkew: 1
|
|
# topologyKey: topology.kubernetes.io/zone
|
|
# whenUnsatisfiable: DoNotSchedule
|
|
|
|
# -- Node labels for pod assignment
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
|
|
# -- Node tolerations for pod assignment on nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations: []
|
|
|
|
# -- Additional hubble-relay environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Annotations to be added to all top-level hubble-relay objects (resources under templates/hubble-relay)
|
|
annotations: {}
|
|
|
|
# -- Annotations to be added to hubble-relay pods
|
|
podAnnotations: {}
|
|
|
|
# -- Labels to be added to hubble-relay pods
|
|
podLabels: {}
|
|
|
|
# PodDisruptionBudget settings
|
|
podDisruptionBudget:
|
|
# -- enable PodDisruptionBudget
|
|
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
|
|
enabled: false
|
|
# -- Minimum number/percentage of pods that should remain scheduled.
|
|
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
|
|
minAvailable: null
|
|
# -- Maximum number/percentage of pods that may be made unavailable
|
|
maxUnavailable: 1
|
|
|
|
# -- The priority class to use for hubble-relay
|
|
priorityClassName: ""
|
|
|
|
# -- Configure termination grace period for hubble relay Deployment.
|
|
terminationGracePeriodSeconds: 1
|
|
|
|
# -- hubble-relay update strategy
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
rollingUpdate:
|
|
maxUnavailable: 1
|
|
|
|
# -- hubble-relay pod security context
|
|
podSecurityContext:
|
|
fsGroup: 65532
|
|
|
|
# -- hubble-relay container security context
|
|
securityContext:
|
|
# readOnlyRootFilesystem: true
|
|
runAsNonRoot: true
|
|
runAsUser: 65532
|
|
runAsGroup: 65532
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
# -- hubble-relay service configuration.
|
|
service:
|
|
# --- The type of service used for Hubble Relay access, either ClusterIP or NodePort.
|
|
type: ClusterIP
|
|
# --- The port to use when the service type is set to NodePort.
|
|
nodePort: 31234
|
|
|
|
# -- Host to listen to. Specify an empty string to bind to all the interfaces.
|
|
listenHost: ""
|
|
|
|
# -- Port to listen to.
|
|
listenPort: "4245"
|
|
|
|
# -- TLS configuration for Hubble Relay
|
|
tls:
|
|
# -- base64 encoded PEM values for the hubble-relay client certificate and private key
|
|
# This keypair is presented to Hubble server instances for mTLS
|
|
# authentication and is required when hubble.tls.enabled is true.
|
|
# These values need to be set manually if hubble.tls.auto.enabled is false.
|
|
client:
|
|
cert: ""
|
|
key: ""
|
|
# -- base64 encoded PEM values for the hubble-relay server certificate and private key
|
|
server:
|
|
# When set to true, enable TLS on for Hubble Relay server
|
|
# (ie: for clients connecting to the Hubble Relay API).
|
|
enabled: false
|
|
# When set to true enforces mutual TLS between Hubble Relay server and its clients.
|
|
# False allow non-mutual TLS connections.
|
|
# This option has no effect when TLS is disabled.
|
|
mtls: false
|
|
# These values need to be set manually if hubble.tls.auto.enabled is false.
|
|
cert: ""
|
|
key: ""
|
|
# -- extra DNS names added to certificate when its auto gen
|
|
extraDnsNames: []
|
|
# -- extra IP addresses added to certificate when its auto gen
|
|
extraIpAddresses: []
|
|
# DNS name used by the backend to connect to the relay
|
|
# This is a simple workaround as the relay certificates are currently hardcoded to
|
|
# *.hubble-relay.cilium.io
|
|
# See https://github.com/cilium/cilium/pull/28709#discussion_r1371792546
|
|
# For GKE Dataplane V2 this should be set to relay.kube-system.svc.cluster.local
|
|
relayName: "ui.hubble-relay.cilium.io"
|
|
|
|
# -- Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s").
|
|
dialTimeout: ~
|
|
|
|
# -- Backoff duration to retry connecting to the local hubble instance in case of failure (e.g. "30s").
|
|
retryTimeout: ~
|
|
|
|
# -- Max number of flows that can be buffered for sorting before being sent to the
|
|
# client (per request) (e.g. 100).
|
|
sortBufferLenMax: ~
|
|
|
|
# -- When the per-request flows sort buffer is not full, a flow is drained every
|
|
# time this timeout is reached (only affects requests in follow-mode) (e.g. "1s").
|
|
sortBufferDrainTimeout: ~
|
|
|
|
# -- Port to use for the k8s service backed by hubble-relay pods.
|
|
# If not set, it is dynamically assigned to port 443 if TLS is enabled and to
|
|
# port 80 if not.
|
|
# servicePort: 80
|
|
|
|
# -- Enable prometheus metrics for hubble-relay on the configured port at
|
|
# /metrics
|
|
prometheus:
|
|
enabled: false
|
|
port: 9966
|
|
serviceMonitor:
|
|
# -- Enable service monitors.
|
|
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
|
enabled: false
|
|
# -- Labels to add to ServiceMonitor hubble-relay
|
|
labels: {}
|
|
# -- Annotations to add to ServiceMonitor hubble-relay
|
|
annotations: {}
|
|
# -- Interval for scrape metrics.
|
|
interval: "10s"
|
|
# -- Specify the Kubernetes namespace where Prometheus expects to find
|
|
# service monitors configured.
|
|
# namespace: ""
|
|
# -- Relabeling configs for the ServiceMonitor hubble-relay
|
|
relabelings: ~
|
|
# -- Metrics relabeling configs for the ServiceMonitor hubble-relay
|
|
metricRelabelings: ~
|
|
|
|
gops:
|
|
# -- Enable gops for hubble-relay
|
|
enabled: true
|
|
# -- Configure gops listen port for hubble-relay
|
|
port: 9893
|
|
|
|
pprof:
|
|
# -- Enable pprof for hubble-relay
|
|
enabled: false
|
|
# -- Configure pprof listen address for hubble-relay
|
|
address: localhost
|
|
# -- Configure pprof listen port for hubble-relay
|
|
port: 6062
|
|
|
|
ui:
|
|
# -- Whether to enable the Hubble UI.
|
|
enabled: false
|
|
|
|
standalone:
|
|
# -- When true, it will allow installing the Hubble UI only, without checking dependencies.
|
|
# It is useful if a cluster already has cilium and Hubble relay installed and you just
|
|
# want Hubble UI to be deployed.
|
|
# When installed via helm, installing UI should be done via `helm upgrade` and when installed via the cilium cli, then `cilium hubble enable --ui`
|
|
enabled: false
|
|
|
|
tls:
|
|
# -- When deploying Hubble UI in standalone, with tls enabled for Hubble relay, it is required
|
|
# to provide a volume for mounting the client certificates.
|
|
certsVolume: {}
|
|
# projected:
|
|
# defaultMode: 0400
|
|
# sources:
|
|
# - secret:
|
|
# name: hubble-ui-client-certs
|
|
# items:
|
|
# - key: tls.crt
|
|
# path: client.crt
|
|
# - key: tls.key
|
|
# path: client.key
|
|
# - key: ca.crt
|
|
# path: hubble-relay-ca.crt
|
|
|
|
# -- Roll out Hubble-ui pods automatically when configmap is updated.
|
|
rollOutPods: false
|
|
|
|
tls:
|
|
# -- base64 encoded PEM values used to connect to hubble-relay
|
|
# This keypair is presented to Hubble Relay instances for mTLS
|
|
# authentication and is required when hubble.relay.tls.server.enabled is true.
|
|
# These values need to be set manually if hubble.tls.auto.enabled is false.
|
|
client:
|
|
cert: ""
|
|
key: ""
|
|
|
|
backend:
|
|
# -- Hubble-ui backend image.
|
|
image:
|
|
override: ~
|
|
repository: "${HUBBLE_UI_BACKEND_REPO}"
|
|
tag: "${HUBBLE_UI_BACKEND_VERSION}"
|
|
digest: "${HUBBLE_UI_BACKEND_DIGEST}"
|
|
useDigest: true
|
|
pullPolicy: "${PULL_POLICY}"
|
|
|
|
# -- Hubble-ui backend security context.
|
|
securityContext: {}
|
|
|
|
# -- Additional hubble-ui backend environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Additional hubble-ui backend volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional hubble-ui backend volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
livenessProbe:
|
|
# -- Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+)
|
|
enabled: false
|
|
|
|
readinessProbe:
|
|
# -- Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+)
|
|
enabled: false
|
|
|
|
# -- Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment.
|
|
resources: {}
|
|
# limits:
|
|
# cpu: 1000m
|
|
# memory: 1024M
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 64Mi
|
|
|
|
frontend:
|
|
# -- Hubble-ui frontend image.
|
|
image:
|
|
override: ~
|
|
repository: "${HUBBLE_UI_FRONTEND_REPO}"
|
|
tag: "${HUBBLE_UI_FRONTEND_VERSION}"
|
|
digest: "${HUBBLE_UI_FRONTEND_DIGEST}"
|
|
useDigest: true
|
|
pullPolicy: "${PULL_POLICY}"
|
|
|
|
# -- Hubble-ui frontend security context.
|
|
securityContext: {}
|
|
|
|
# -- Additional hubble-ui frontend environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Additional hubble-ui frontend volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional hubble-ui frontend volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment.
|
|
resources: {}
|
|
# limits:
|
|
# cpu: 1000m
|
|
# memory: 1024M
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 64Mi
|
|
server:
|
|
# -- Controls server listener for ipv6
|
|
ipv6:
|
|
enabled: true
|
|
|
|
# -- The number of replicas of Hubble UI to deploy.
|
|
replicas: 1
|
|
|
|
# -- Annotations to be added to all top-level hubble-ui objects (resources under templates/hubble-ui)
|
|
annotations: {}
|
|
|
|
# -- Annotations to be added to hubble-ui pods
|
|
podAnnotations: {}
|
|
|
|
# -- Labels to be added to hubble-ui pods
|
|
podLabels: {}
|
|
|
|
# PodDisruptionBudget settings
|
|
podDisruptionBudget:
|
|
# -- enable PodDisruptionBudget
|
|
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
|
|
enabled: false
|
|
# -- Minimum number/percentage of pods that should remain scheduled.
|
|
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
|
|
minAvailable: null
|
|
# -- Maximum number/percentage of pods that may be made unavailable
|
|
maxUnavailable: 1
|
|
|
|
# -- Affinity for hubble-ui
|
|
affinity: {}
|
|
|
|
# -- Pod topology spread constraints for hubble-ui
|
|
topologySpreadConstraints: []
|
|
# - maxSkew: 1
|
|
# topologyKey: topology.kubernetes.io/zone
|
|
# whenUnsatisfiable: DoNotSchedule
|
|
|
|
# -- Node labels for pod assignment
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
|
|
# -- Node tolerations for pod assignment on nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations: []
|
|
|
|
# -- The priority class to use for hubble-ui
|
|
priorityClassName: ""
|
|
|
|
# -- hubble-ui update strategy.
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
rollingUpdate:
|
|
maxUnavailable: 1
|
|
|
|
# -- Security context to be added to Hubble UI pods
|
|
securityContext:
|
|
runAsUser: 1001
|
|
runAsGroup: 1001
|
|
fsGroup: 1001
|
|
|
|
# -- hubble-ui service configuration.
|
|
service:
|
|
# -- Annotations to be added for the Hubble UI service
|
|
annotations: {}
|
|
# --- The type of service used for Hubble UI access, either ClusterIP or NodePort.
|
|
type: ClusterIP
|
|
# --- The port to use when the service type is set to NodePort.
|
|
nodePort: 31235
|
|
|
|
# -- Defines base url prefix for all hubble-ui http requests.
|
|
# It needs to be changed in case if ingress for hubble-ui is configured under some sub-path.
|
|
# Trailing `/` is required for custom path, ex. `/service-map/`
|
|
baseUrl: "/"
|
|
|
|
# -- hubble-ui ingress configuration.
|
|
ingress:
|
|
enabled: false
|
|
annotations: {}
|
|
# kubernetes.io/ingress.class: nginx
|
|
# kubernetes.io/tls-acme: "true"
|
|
className: ""
|
|
hosts:
|
|
- chart-example.local
|
|
labels: {}
|
|
tls: []
|
|
# - secretName: chart-example-tls
|
|
# hosts:
|
|
# - chart-example.local
|
|
|
|
# -- Method to use for identity allocation (`crd` or `kvstore`).
|
|
identityAllocationMode: "crd"
|
|
|
|
# -- (string) Time to wait before using new identity on endpoint identity change.
|
|
# @default -- `"5s"`
|
|
identityChangeGracePeriod: ""
|
|
|
|
# -- Install Iptables rules to skip netfilter connection tracking on all pod
|
|
# traffic. This option is only effective when Cilium is running in direct
|
|
# routing and full KPR mode. Moreover, this option cannot be enabled when Cilium
|
|
# is running in a managed Kubernetes environment or in a chained CNI setup.
|
|
installNoConntrackIptablesRules: false
|
|
|
|
ipam:
|
|
# -- Configure IP Address Management mode.
|
|
# ref: https://docs.cilium.io/en/stable/network/concepts/ipam/
|
|
mode: "cluster-pool"
|
|
# -- Maximum rate at which the CiliumNode custom resource is updated.
|
|
ciliumNodeUpdateRate: "15s"
|
|
operator:
|
|
# -- IPv4 CIDR list range to delegate to individual nodes for IPAM.
|
|
clusterPoolIPv4PodCIDRList: ["10.0.0.0/8"]
|
|
# -- IPv4 CIDR mask size to delegate to individual nodes for IPAM.
|
|
clusterPoolIPv4MaskSize: 24
|
|
# -- IPv6 CIDR list range to delegate to individual nodes for IPAM.
|
|
clusterPoolIPv6PodCIDRList: ["fd00::/104"]
|
|
# -- IPv6 CIDR mask size to delegate to individual nodes for IPAM.
|
|
clusterPoolIPv6MaskSize: 120
|
|
# -- IP pools to auto-create in multi-pool IPAM mode.
|
|
autoCreateCiliumPodIPPools: {}
|
|
# default:
|
|
# ipv4:
|
|
# cidrs:
|
|
# - 10.10.0.0/8
|
|
# maskSize: 24
|
|
# other:
|
|
# ipv6:
|
|
# cidrs:
|
|
# - fd00:100::/80
|
|
# maskSize: 96
|
|
# -- The maximum burst size when rate limiting access to external APIs.
|
|
# Also known as the token bucket capacity.
|
|
# @default -- `20`
|
|
externalAPILimitBurstSize: ~
|
|
# -- The maximum queries per second when rate limiting access to
|
|
# external APIs. Also known as the bucket refill rate, which is used to
|
|
# refill the bucket up to the burst size capacity.
|
|
# @default -- `4.0`
|
|
externalAPILimitQPS: ~
|
|
|
|
# -- The api-rate-limit option can be used to overwrite individual settings of the default configuration for rate limiting calls to the Cilium Agent API
|
|
apiRateLimit: ~
|
|
|
|
# -- Configure the eBPF-based ip-masq-agent
|
|
ipMasqAgent:
|
|
enabled: false
|
|
# the config of nonMasqueradeCIDRs
|
|
# config:
|
|
# nonMasqueradeCIDRs: []
|
|
# masqLinkLocal: false
|
|
# masqLinkLocalIPv6: false
|
|
|
|
# iptablesLockTimeout defines the iptables "--wait" option when invoked from Cilium.
|
|
# iptablesLockTimeout: "5s"
|
|
|
|
ipv4:
|
|
# -- Enable IPv4 support.
|
|
enabled: true
|
|
|
|
ipv6:
|
|
# -- Enable IPv6 support.
|
|
enabled: false
|
|
|
|
# -- Configure Kubernetes specific configuration
|
|
k8s: {}
|
|
# -- requireIPv4PodCIDR enables waiting for Kubernetes to provide the PodCIDR
|
|
# range via the Kubernetes node resource
|
|
# requireIPv4PodCIDR: false
|
|
|
|
# -- requireIPv6PodCIDR enables waiting for Kubernetes to provide the PodCIDR
|
|
# range via the Kubernetes node resource
|
|
# requireIPv6PodCIDR: false
|
|
|
|
# -- Keep the deprecated selector labels when deploying Cilium DaemonSet.
|
|
keepDeprecatedLabels: false
|
|
|
|
# -- Keep the deprecated probes when deploying Cilium DaemonSet
|
|
keepDeprecatedProbes: false
|
|
|
|
startupProbe:
|
|
# -- failure threshold of startup probe.
|
|
# 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
|
|
failureThreshold: 105
|
|
# -- interval between checks of the startup probe
|
|
periodSeconds: 2
|
|
livenessProbe:
|
|
# -- failure threshold of liveness probe
|
|
failureThreshold: 10
|
|
# -- interval between checks of the liveness probe
|
|
periodSeconds: 30
|
|
readinessProbe:
|
|
# -- failure threshold of readiness probe
|
|
failureThreshold: 3
|
|
# -- interval between checks of the readiness probe
|
|
periodSeconds: 30
|
|
|
|
# -- Configure the kube-proxy replacement in Cilium BPF datapath
|
|
# Valid options are "true", "false", "disabled" (deprecated), "partial" (deprecated), "strict" (deprecated).
|
|
# ref: https://docs.cilium.io/en/stable/network/kubernetes/kubeproxy-free/
|
|
#kubeProxyReplacement: "false"
|
|
|
|
# -- healthz server bind address for the kube-proxy replacement.
|
|
# To enable set the value to '0.0.0.0:10256' for all ipv4
|
|
# addresses and this '[::]:10256' for all ipv6 addresses.
|
|
# By default it is disabled.
|
|
kubeProxyReplacementHealthzBindAddr: ""
|
|
|
|
l2NeighDiscovery:
|
|
# -- Enable L2 neighbor discovery in the agent
|
|
enabled: true
|
|
# -- Override the agent's default neighbor resolution refresh period.
|
|
refreshPeriod: "30s"
|
|
|
|
# -- Enable Layer 7 network policy.
|
|
l7Proxy: true
|
|
|
|
# -- Enable Local Redirect Policy.
|
|
localRedirectPolicy: false
|
|
|
|
# To include or exclude matched resources from cilium identity evaluation
|
|
# labels: ""
|
|
|
|
# logOptions allows you to define logging options. eg:
|
|
# logOptions:
|
|
# format: json
|
|
|
|
# -- Enables periodic logging of system load
|
|
logSystemLoad: false
|
|
|
|
# -- Configure maglev consistent hashing
|
|
maglev: {}
|
|
# -- tableSize is the size (parameter M) for the backend table of one
|
|
# service entry
|
|
# tableSize:
|
|
|
|
# -- hashSeed is the cluster-wide base64 encoded seed for the hashing
|
|
# hashSeed:
|
|
|
|
# -- Enables masquerading of IPv4 traffic leaving the node from endpoints.
|
|
enableIPv4Masquerade: true
|
|
|
|
# -- Enables masquerading of IPv6 traffic leaving the node from endpoints.
|
|
enableIPv6Masquerade: true
|
|
|
|
# -- Enables masquerading to the source of the route for traffic leaving the node from endpoints.
|
|
enableMasqueradeRouteSource: false
|
|
|
|
# -- Enables IPv4 BIG TCP support which increases maximum IPv4 GSO/GRO limits for nodes and pods
|
|
enableIPv4BIGTCP: false
|
|
|
|
# -- Enables IPv6 BIG TCP support which increases maximum IPv6 GSO/GRO limits for nodes and pods
|
|
enableIPv6BIGTCP: false
|
|
|
|
egressGateway:
|
|
# -- Enables egress gateway to redirect and SNAT the traffic that leaves the
|
|
# cluster.
|
|
enabled: false
|
|
# -- Deprecated without a replacement necessary.
|
|
installRoutes: false
|
|
# -- Time between triggers of egress gateway state reconciliations
|
|
reconciliationTriggerInterval: 1s
|
|
# -- Maximum number of entries in egress gateway policy map
|
|
# maxPolicyEntries: 16384
|
|
|
|
vtep:
|
|
# -- Enables VXLAN Tunnel Endpoint (VTEP) Integration (beta) to allow
|
|
# Cilium-managed pods to talk to third party VTEP devices over Cilium tunnel.
|
|
enabled: false
|
|
|
|
# -- A space separated list of VTEP device endpoint IPs, for example "1.1.1.1 1.1.2.1"
|
|
endpoint: ""
|
|
# -- A space separated list of VTEP device CIDRs, for example "1.1.1.0/24 1.1.2.0/24"
|
|
cidr: ""
|
|
# -- VTEP CIDRs Mask that applies to all VTEP CIDRs, for example "255.255.255.0"
|
|
mask: ""
|
|
# -- A space separated list of VTEP device MAC addresses (VTEP MAC), for example "x:x:x:x:x:x y:y:y:y:y:y:y"
|
|
mac: ""
|
|
|
|
# -- (string) Allows to explicitly specify the IPv4 CIDR for native routing.
|
|
# When specified, Cilium assumes networking for this CIDR is preconfigured and
|
|
# hands traffic destined for that range to the Linux network stack without
|
|
# applying any SNAT.
|
|
# Generally speaking, specifying a native routing CIDR implies that Cilium can
|
|
# depend on the underlying networking stack to route packets to their
|
|
# destination. To offer a concrete example, if Cilium is configured to use
|
|
# direct routing and the Kubernetes CIDR is included in the native routing CIDR,
|
|
# the user must configure the routes to reach pods, either manually or by
|
|
# setting the auto-direct-node-routes flag.
|
|
ipv4NativeRoutingCIDR: ""
|
|
|
|
# -- (string) Allows to explicitly specify the IPv6 CIDR for native routing.
|
|
# When specified, Cilium assumes networking for this CIDR is preconfigured and
|
|
# hands traffic destined for that range to the Linux network stack without
|
|
# applying any SNAT.
|
|
# Generally speaking, specifying a native routing CIDR implies that Cilium can
|
|
# depend on the underlying networking stack to route packets to their
|
|
# destination. To offer a concrete example, if Cilium is configured to use
|
|
# direct routing and the Kubernetes CIDR is included in the native routing CIDR,
|
|
# the user must configure the routes to reach pods, either manually or by
|
|
# setting the auto-direct-node-routes flag.
|
|
ipv6NativeRoutingCIDR: ""
|
|
|
|
# -- cilium-monitor sidecar.
|
|
monitor:
|
|
# -- Enable the cilium-monitor sidecar.
|
|
enabled: false
|
|
|
|
# -- Configure service load balancing
|
|
loadBalancer:
|
|
# -- standalone enables the standalone L4LB which does not connect to
|
|
# kube-apiserver.
|
|
# standalone: false
|
|
|
|
# -- algorithm is the name of the load balancing algorithm for backend
|
|
# selection e.g. random or maglev
|
|
# algorithm: random
|
|
|
|
# -- mode is the operation mode of load balancing for remote backends
|
|
# e.g. snat, dsr, hybrid
|
|
# mode: snat
|
|
|
|
# -- acceleration is the option to accelerate service handling via XDP
|
|
# e.g. native, disabled
|
|
# acceleration: disabled
|
|
|
|
# -- dsrDispatch configures whether IP option or IPIP encapsulation is
|
|
# used to pass a service IP and port to remote backend
|
|
# dsrDispatch: opt
|
|
|
|
# -- serviceTopology enables K8s Topology Aware Hints -based service
|
|
# endpoints filtering
|
|
# serviceTopology: false
|
|
|
|
# -- L7 LoadBalancer
|
|
l7:
|
|
# -- Enable L7 service load balancing via envoy proxy.
|
|
# The request to a k8s service, which has specific annotation e.g. service.cilium.io/lb-l7,
|
|
# will be forwarded to the local backend proxy to be load balanced to the service endpoints.
|
|
# Please refer to docs for supported annotations for more configuration.
|
|
#
|
|
# Applicable values:
|
|
# - envoy: Enable L7 load balancing via envoy proxy. This will automatically set enable-envoy-config as well.
|
|
# - disabled: Disable L7 load balancing by way of service annotation.
|
|
backend: disabled
|
|
# -- List of ports from service to be automatically redirected to above backend.
|
|
# Any service exposing one of these ports will be automatically redirected.
|
|
# Fine-grained control can be achieved by using the service annotation.
|
|
ports: []
|
|
# -- Default LB algorithm
|
|
# The default LB algorithm to be used for services, which can be overridden by the
|
|
# service annotation (e.g. service.cilium.io/lb-l7-algorithm)
|
|
# Applicable values: round_robin, least_request, random
|
|
algorithm: round_robin
|
|
|
|
# -- Configure N-S k8s service loadbalancing
|
|
nodePort:
|
|
# -- Enable the Cilium NodePort service implementation.
|
|
enabled: false
|
|
|
|
# -- Port range to use for NodePort services.
|
|
# range: "30000,32767"
|
|
|
|
# -- Set to true to prevent applications binding to service ports.
|
|
bindProtection: true
|
|
|
|
# -- Append NodePort range to ip_local_reserved_ports if clash with ephemeral
|
|
# ports is detected.
|
|
autoProtectPortRange: true
|
|
|
|
# -- Enable healthcheck nodePort server for NodePort services
|
|
enableHealthCheck: true
|
|
|
|
# -- Enable access of the healthcheck nodePort on the LoadBalancerIP. Needs
|
|
# EnableHealthCheck to be enabled
|
|
enableHealthCheckLoadBalancerIP: false
|
|
|
|
# policyAuditMode: false
|
|
|
|
# -- The agent can be put into one of the three policy enforcement modes:
|
|
# default, always and never.
|
|
# ref: https://docs.cilium.io/en/stable/security/policy/intro/#policy-enforcement-modes
|
|
policyEnforcementMode: "default"
|
|
|
|
# -- policyCIDRMatchMode is a list of entities that may be selected by CIDR selector.
|
|
# The possible value is "nodes".
|
|
policyCIDRMatchMode:
|
|
|
|
pprof:
|
|
# -- Enable pprof for cilium-agent
|
|
enabled: false
|
|
# -- Configure pprof listen address for cilium-agent
|
|
address: localhost
|
|
# -- Configure pprof listen port for cilium-agent
|
|
port: 6060
|
|
|
|
# -- Configure prometheus metrics on the configured port at /metrics
|
|
prometheus:
|
|
enabled: false
|
|
port: 9962
|
|
serviceMonitor:
|
|
# -- Enable service monitors.
|
|
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
|
enabled: false
|
|
# -- Labels to add to ServiceMonitor cilium-agent
|
|
labels: {}
|
|
# -- Annotations to add to ServiceMonitor cilium-agent
|
|
annotations: {}
|
|
# -- jobLabel to add for ServiceMonitor cilium-agent
|
|
jobLabel: ""
|
|
# -- Interval for scrape metrics.
|
|
interval: "10s"
|
|
# -- Specify the Kubernetes namespace where Prometheus expects to find
|
|
# service monitors configured.
|
|
# namespace: ""
|
|
# -- Relabeling configs for the ServiceMonitor cilium-agent
|
|
relabelings:
|
|
- sourceLabels:
|
|
- __meta_kubernetes_pod_node_name
|
|
targetLabel: node
|
|
replacement: ${1}
|
|
# -- Metrics relabeling configs for the ServiceMonitor cilium-agent
|
|
metricRelabelings: ~
|
|
# -- Set to `true` and helm will not check for monitoring.coreos.com/v1 CRDs before deploying
|
|
trustCRDsExist: false
|
|
|
|
# -- Metrics that should be enabled or disabled from the default metric list.
|
|
# The list is expected to be separated by a space. (+metric_foo to enable
|
|
# metric_foo , -metric_bar to disable metric_bar).
|
|
# ref: https://docs.cilium.io/en/stable/observability/metrics/
|
|
metrics: ~
|
|
|
|
# --- Enable controller group metrics for monitoring specific Cilium
|
|
# subsystems. The list is a list of controller group names. The special
|
|
# values of "all" and "none" are supported. The set of controller
|
|
# group names is not guaranteed to be stable between Cilium versions.
|
|
controllerGroupMetrics:
|
|
- write-cni-file
|
|
- sync-host-ips
|
|
- sync-lb-maps-with-k8s-services
|
|
|
|
# -- Grafana dashboards for cilium-agent
|
|
# grafana can import dashboards based on the label and value
|
|
# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
|
|
dashboards:
|
|
enabled: false
|
|
label: grafana_dashboard
|
|
namespace: ~
|
|
labelValue: "1"
|
|
annotations: {}
|
|
|
|
# -- Configure Istio proxy options.
|
|
proxy:
|
|
|
|
prometheus:
|
|
# -- Deprecated in favor of envoy.prometheus.enabled
|
|
enabled: true
|
|
# -- Deprecated in favor of envoy.prometheus.port
|
|
port: ~
|
|
# -- Regular expression matching compatible Istio sidecar istio-proxy
|
|
# container image names
|
|
sidecarImageRegex: "cilium/istio_proxy"
|
|
|
|
# Configure Cilium Envoy options.
|
|
envoy:
|
|
# -- Enable Envoy Proxy in standalone DaemonSet.
|
|
enabled: false
|
|
|
|
log:
|
|
# -- The format string to use for laying out the log message metadata of Envoy.
|
|
format: "[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"
|
|
# -- Path to a separate Envoy log file, if any. Defaults to /dev/stdout.
|
|
path: ""
|
|
|
|
# -- Time in seconds after which a TCP connection attempt times out
|
|
connectTimeoutSeconds: 2
|
|
# -- ProxyMaxRequestsPerConnection specifies the max_requests_per_connection setting for Envoy
|
|
maxRequestsPerConnection: 0
|
|
# -- Set Envoy HTTP option max_connection_duration seconds. Default 0 (disable)
|
|
maxConnectionDurationSeconds: 0
|
|
# -- Set Envoy upstream HTTP idle connection timeout seconds.
|
|
# Does not apply to connections with pending requests. Default 60s
|
|
idleTimeoutDurationSeconds: 60
|
|
|
|
# -- Envoy container image.
|
|
image:
|
|
override: ~
|
|
repository: "${CILIUM_ENVOY_REPO}"
|
|
tag: "${CILIUM_ENVOY_VERSION}"
|
|
pullPolicy: "${PULL_POLICY}"
|
|
digest: "${CILIUM_ENVOY_DIGEST}"
|
|
useDigest: true
|
|
|
|
# -- Additional containers added to the cilium Envoy DaemonSet.
|
|
extraContainers: []
|
|
|
|
# -- Additional envoy container arguments.
|
|
extraArgs: []
|
|
|
|
# -- Additional envoy container environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Additional envoy hostPath mounts.
|
|
extraHostPathMounts: []
|
|
# - name: host-mnt-data
|
|
# mountPath: /host/mnt/data
|
|
# hostPath: /mnt/data
|
|
# hostPathType: Directory
|
|
# readOnly: true
|
|
# mountPropagation: HostToContainer
|
|
|
|
# -- Additional envoy volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional envoy volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- Configure termination grace period for cilium-envoy DaemonSet.
|
|
terminationGracePeriodSeconds: 1
|
|
|
|
# -- TCP port for the health API.
|
|
healthPort: 9878
|
|
|
|
# -- cilium-envoy update strategy
|
|
# ref: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/#updating-a-daemonset
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
rollingUpdate:
|
|
maxUnavailable: 2
|
|
# -- Roll out cilium envoy pods automatically when configmap is updated.
|
|
rollOutPods: false
|
|
|
|
# -- Annotations to be added to all top-level cilium-envoy objects (resources under templates/cilium-envoy)
|
|
annotations: {}
|
|
|
|
# -- Security Context for cilium-envoy pods.
|
|
podSecurityContext: {}
|
|
|
|
# -- Annotations to be added to envoy pods
|
|
podAnnotations: {}
|
|
|
|
# -- Labels to be added to envoy pods
|
|
podLabels: {}
|
|
|
|
# -- Envoy resource limits & requests
|
|
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
resources: {}
|
|
# limits:
|
|
# cpu: 4000m
|
|
# memory: 4Gi
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 512Mi
|
|
|
|
startupProbe:
|
|
# -- failure threshold of startup probe.
|
|
# 105 x 2s translates to the old behaviour of the readiness probe (120s delay + 30 x 3s)
|
|
failureThreshold: 105
|
|
# -- interval between checks of the startup probe
|
|
periodSeconds: 2
|
|
livenessProbe:
|
|
# -- failure threshold of liveness probe
|
|
failureThreshold: 10
|
|
# -- interval between checks of the liveness probe
|
|
periodSeconds: 30
|
|
readinessProbe:
|
|
# -- failure threshold of readiness probe
|
|
failureThreshold: 3
|
|
# -- interval between checks of the readiness probe
|
|
periodSeconds: 30
|
|
|
|
securityContext:
|
|
# -- User to run the pod with
|
|
# runAsUser: 0
|
|
# -- Run the pod with elevated privileges
|
|
privileged: false
|
|
# -- SELinux options for the `cilium-envoy` container
|
|
seLinuxOptions:
|
|
level: 's0'
|
|
# Running with spc_t since we have removed the privileged mode.
|
|
# Users can change it to a different type as long as they have the
|
|
# type available on the system.
|
|
type: 'spc_t'
|
|
capabilities:
|
|
# -- Capabilities for the `cilium-envoy` container
|
|
envoy:
|
|
# Used since cilium proxy uses setting IPPROTO_IP/IP_TRANSPARENT
|
|
- NET_ADMIN
|
|
# We need it for now but might not need it for >= 5.11 specially
|
|
# for the 'SYS_RESOURCE'.
|
|
# In >= 5.8 there's already BPF and PERMON capabilities
|
|
- SYS_ADMIN
|
|
# Both PERFMON and BPF requires kernel 5.8, container runtime
|
|
# cri-o >= v1.22.0 or containerd >= v1.5.0.
|
|
# If available, SYS_ADMIN can be removed.
|
|
#- PERFMON
|
|
#- BPF
|
|
|
|
# -- Affinity for cilium-envoy.
|
|
affinity:
|
|
podAntiAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- topologyKey: kubernetes.io/hostname
|
|
labelSelector:
|
|
matchLabels:
|
|
k8s-app: cilium-envoy
|
|
|
|
# -- Node selector for cilium-envoy.
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
|
|
# -- Node tolerations for envoy scheduling to nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations:
|
|
- operator: Exists
|
|
# - key: "key"
|
|
# operator: "Equal|Exists"
|
|
# value: "value"
|
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
|
|
|
# -- The priority class to use for cilium-envoy.
|
|
priorityClassName: ~
|
|
|
|
# -- DNS policy for Cilium envoy pods.
|
|
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
|
dnsPolicy: ~
|
|
|
|
prometheus:
|
|
# -- Enable prometheus metrics for cilium-envoy
|
|
enabled: true
|
|
serviceMonitor:
|
|
# -- Enable service monitors.
|
|
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
|
enabled: false
|
|
# -- Labels to add to ServiceMonitor cilium-envoy
|
|
labels: {}
|
|
# -- Annotations to add to ServiceMonitor cilium-envoy
|
|
annotations: {}
|
|
# -- Interval for scrape metrics.
|
|
interval: "10s"
|
|
# -- Specify the Kubernetes namespace where Prometheus expects to find
|
|
# service monitors configured.
|
|
# namespace: ""
|
|
# -- Relabeling configs for the ServiceMonitor cilium-envoy
|
|
relabelings:
|
|
- sourceLabels:
|
|
- __meta_kubernetes_pod_node_name
|
|
targetLabel: node
|
|
replacement: ${1}
|
|
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
|
|
metricRelabelings: ~
|
|
# -- Serve prometheus metrics for cilium-envoy on the configured port
|
|
port: "9964"
|
|
|
|
# -- Enable use of the remote node identity.
|
|
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
|
|
remoteNodeIdentity: true
|
|
|
|
# -- Enable resource quotas for priority classes used in the cluster.
|
|
resourceQuotas:
|
|
enabled: false
|
|
cilium:
|
|
hard:
|
|
# 5k nodes * 2 DaemonSets (Cilium and cilium node init)
|
|
pods: "10k"
|
|
operator:
|
|
hard:
|
|
# 15 "clusterwide" Cilium Operator pods for HA
|
|
pods: "15"
|
|
|
|
# Need to document default
|
|
##################
|
|
#sessionAffinity: false
|
|
|
|
# -- Do not run Cilium agent when running with clean mode. Useful to completely
|
|
# uninstall Cilium as it will stop Cilium from starting and create artifacts
|
|
# in the node.
|
|
sleepAfterInit: false
|
|
|
|
# -- Enable check of service source ranges (currently, only for LoadBalancer).
|
|
svcSourceRangeCheck: true
|
|
|
|
# -- Synchronize Kubernetes nodes to kvstore and perform CNP GC.
|
|
synchronizeK8sNodes: true
|
|
|
|
# -- Configure TLS configuration in the agent.
|
|
tls:
|
|
# -- This configures how the Cilium agent loads the secrets used TLS-aware CiliumNetworkPolicies
|
|
# (namely the secrets referenced by terminatingTLS and originatingTLS).
|
|
# Possible values:
|
|
# - local
|
|
# - k8s
|
|
secretsBackend: local
|
|
|
|
# -- Base64 encoded PEM values for the CA certificate and private key.
|
|
# This can be used as common CA to generate certificates used by hubble and clustermesh components.
|
|
# It is neither required nor used when cert-manager is used to generate the certificates.
|
|
ca:
|
|
# -- Optional CA cert. If it is provided, it will be used by cilium to
|
|
# generate all other certificates. Otherwise, an ephemeral CA is generated.
|
|
cert: ""
|
|
|
|
# -- Optional CA private key. If it is provided, it will be used by cilium to
|
|
# generate all other certificates. Otherwise, an ephemeral CA is generated.
|
|
key: ""
|
|
|
|
# -- Generated certificates validity duration in days. This will be used for auto generated CA.
|
|
certValidityDuration: 1095
|
|
|
|
# -- Configure the CA trust bundle used for the validation of the certificates
|
|
# leveraged by hubble and clustermesh. When enabled, it overrides the content of the
|
|
# 'ca.crt' field of the respective certificates, allowing for CA rotation with no down-time.
|
|
caBundle:
|
|
# -- Enable the use of the CA trust bundle.
|
|
enabled: false
|
|
|
|
# -- Name of the ConfigMap containing the CA trust bundle.
|
|
name: cilium-root-ca.crt
|
|
|
|
# -- Entry of the ConfigMap containing the CA trust bundle.
|
|
key: ca.crt
|
|
|
|
# -- Use a Secret instead of a ConfigMap.
|
|
useSecret: false
|
|
|
|
# If uncommented, creates the ConfigMap and fills it with the specified content.
|
|
# Otherwise, the ConfigMap is assumed to be already present in .Release.Namespace.
|
|
#
|
|
# content: |
|
|
# -----BEGIN CERTIFICATE-----
|
|
# ...
|
|
# -----END CERTIFICATE-----
|
|
# -----BEGIN CERTIFICATE-----
|
|
# ...
|
|
# -----END CERTIFICATE-----
|
|
|
|
# -- Configure the encapsulation configuration for communication between nodes.
|
|
# Deprecated in favor of tunnelProtocol and routingMode. To be removed in 1.15.
|
|
# Possible values:
|
|
# - disabled
|
|
# - vxlan
|
|
# - geneve
|
|
# @default -- `"vxlan"`
|
|
tunnel: ""
|
|
|
|
# -- Tunneling protocol to use in tunneling mode and for ad-hoc tunnels.
|
|
# Possible values:
|
|
# - ""
|
|
# - vxlan
|
|
# - geneve
|
|
# @default -- `"vxlan"`
|
|
tunnelProtocol: ""
|
|
|
|
# -- Enable native-routing mode or tunneling mode.
|
|
# Possible values:
|
|
# - ""
|
|
# - native
|
|
# - tunnel
|
|
# @default -- `"tunnel"`
|
|
routingMode: ""
|
|
|
|
# -- Configure VXLAN and Geneve tunnel port.
|
|
# @default -- Port 8472 for VXLAN, Port 6081 for Geneve
|
|
tunnelPort: 0
|
|
|
|
# -- Configure the underlying network MTU to overwrite auto-detected MTU.
|
|
MTU: 0
|
|
|
|
# -- Disable the usage of CiliumEndpoint CRD.
|
|
disableEndpointCRD: false
|
|
|
|
wellKnownIdentities:
|
|
# -- Enable the use of well-known identities.
|
|
enabled: false
|
|
|
|
etcd:
|
|
# -- Enable etcd mode for the agent.
|
|
enabled: false
|
|
|
|
# -- cilium-etcd-operator image.
|
|
image:
|
|
override: ~
|
|
repository: "${CILIUM_ETCD_OPERATOR_REPO}"
|
|
tag: "${CILIUM_ETCD_OPERATOR_VERSION}"
|
|
digest: "${CILIUM_ETCD_OPERATOR_DIGEST}"
|
|
useDigest: true
|
|
pullPolicy: "${PULL_POLICY}"
|
|
|
|
# -- The priority class to use for cilium-etcd-operator
|
|
priorityClassName: ""
|
|
|
|
# -- Additional cilium-etcd-operator container arguments.
|
|
extraArgs: []
|
|
|
|
# -- Additional cilium-etcd-operator volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional cilium-etcd-operator volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- Node tolerations for cilium-etcd-operator scheduling to nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations:
|
|
- operator: Exists
|
|
# - key: "key"
|
|
# operator: "Equal|Exists"
|
|
# value: "value"
|
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
|
|
|
# -- Pod topology spread constraints for cilium-etcd-operator
|
|
topologySpreadConstraints: []
|
|
# - maxSkew: 1
|
|
# topologyKey: topology.kubernetes.io/zone
|
|
# whenUnsatisfiable: DoNotSchedule
|
|
|
|
# -- Node labels for cilium-etcd-operator pod assignment
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
|
|
# -- Annotations to be added to all top-level etcd-operator objects (resources under templates/etcd-operator)
|
|
annotations: {}
|
|
|
|
# -- Security context to be added to cilium-etcd-operator pods
|
|
podSecurityContext: {}
|
|
|
|
# -- Annotations to be added to cilium-etcd-operator pods
|
|
podAnnotations: {}
|
|
|
|
# -- Labels to be added to cilium-etcd-operator pods
|
|
podLabels: {}
|
|
|
|
# PodDisruptionBudget settings
|
|
podDisruptionBudget:
|
|
# -- enable PodDisruptionBudget
|
|
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
|
|
enabled: false
|
|
# -- Minimum number/percentage of pods that should remain scheduled.
|
|
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
|
|
minAvailable: null
|
|
# -- Maximum number/percentage of pods that may be made unavailable
|
|
maxUnavailable: 1
|
|
|
|
# -- cilium-etcd-operator resource limits & requests
|
|
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
resources: {}
|
|
# limits:
|
|
# cpu: 4000m
|
|
# memory: 4Gi
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 512Mi
|
|
|
|
# -- Security context to be added to cilium-etcd-operator pods
|
|
securityContext: {}
|
|
# runAsUser: 0
|
|
|
|
# -- cilium-etcd-operator update strategy
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
rollingUpdate:
|
|
maxSurge: 1
|
|
maxUnavailable: 1
|
|
|
|
# -- If etcd is behind a k8s service set this option to true so that Cilium
|
|
# does the service translation automatically without requiring a DNS to be
|
|
# running.
|
|
k8sService: false
|
|
|
|
# -- Cluster domain for cilium-etcd-operator.
|
|
clusterDomain: cluster.local
|
|
|
|
# -- List of etcd endpoints (not needed when using managed=true).
|
|
endpoints:
|
|
- https://CHANGE-ME:2379
|
|
|
|
# -- Enable use of TLS/SSL for connectivity to etcd. (auto-enabled if
|
|
# managed=true)
|
|
ssl: false
|
|
|
|
operator:
|
|
# -- Enable the cilium-operator component (required).
|
|
enabled: true
|
|
|
|
# -- Roll out cilium-operator pods automatically when configmap is updated.
|
|
rollOutPods: false
|
|
|
|
# -- cilium-operator image.
|
|
image:
|
|
override: ~
|
|
repository: "${CILIUM_OPERATOR_BASE_REPO}"
|
|
tag: "${CILIUM_VERSION}"
|
|
# operator-generic-digest
|
|
genericDigest: ${OPERATOR_GENERIC_DIGEST}
|
|
# operator-azure-digest
|
|
azureDigest: ${OPERATOR_AZURE_DIGEST}
|
|
# operator-aws-digest
|
|
awsDigest: ${OPERATOR_AWS_DIGEST}
|
|
# operator-alibabacloud-digest
|
|
alibabacloudDigest: ${OPERATOR_ALIBABACLOUD_DIGEST}
|
|
useDigest: ${USE_DIGESTS}
|
|
pullPolicy: "${PULL_POLICY}"
|
|
suffix: "${CILIUM_OPERATOR_SUFFIX}"
|
|
|
|
# -- Number of replicas to run for the cilium-operator deployment
|
|
replicas: 2
|
|
|
|
# -- The priority class to use for cilium-operator
|
|
priorityClassName: ""
|
|
|
|
# -- DNS policy for Cilium operator pods.
|
|
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
|
|
dnsPolicy: ""
|
|
|
|
# -- cilium-operator update strategy
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
rollingUpdate:
|
|
maxSurge: 25%
|
|
maxUnavailable: 50%
|
|
|
|
# -- Affinity for cilium-operator
|
|
affinity:
|
|
podAntiAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- topologyKey: kubernetes.io/hostname
|
|
labelSelector:
|
|
matchLabels:
|
|
io.cilium/app: operator
|
|
|
|
# -- Pod topology spread constraints for cilium-operator
|
|
topologySpreadConstraints: []
|
|
# - maxSkew: 1
|
|
# topologyKey: topology.kubernetes.io/zone
|
|
# whenUnsatisfiable: DoNotSchedule
|
|
|
|
# -- Node labels for cilium-operator pod assignment
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
|
|
# -- Node tolerations for cilium-operator scheduling to nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations:
|
|
- operator: Exists
|
|
# - key: "key"
|
|
# operator: "Equal|Exists"
|
|
# value: "value"
|
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
|
|
|
# -- Additional cilium-operator container arguments.
|
|
extraArgs: []
|
|
|
|
# -- Additional cilium-operator environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Additional cilium-operator hostPath mounts.
|
|
extraHostPathMounts: []
|
|
# - name: host-mnt-data
|
|
# mountPath: /host/mnt/data
|
|
# hostPath: /mnt/data
|
|
# hostPathType: Directory
|
|
# readOnly: true
|
|
# mountPropagation: HostToContainer
|
|
|
|
# -- Additional cilium-operator volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional cilium-operator volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- Annotations to be added to all top-level cilium-operator objects (resources under templates/cilium-operator)
|
|
annotations: {}
|
|
|
|
# -- Security context to be added to cilium-operator pods
|
|
podSecurityContext: {}
|
|
|
|
# -- Annotations to be added to cilium-operator pods
|
|
podAnnotations: {}
|
|
|
|
# -- Labels to be added to cilium-operator pods
|
|
podLabels: {}
|
|
|
|
# PodDisruptionBudget settings
|
|
podDisruptionBudget:
|
|
# -- enable PodDisruptionBudget
|
|
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
|
|
enabled: false
|
|
# -- Minimum number/percentage of pods that should remain scheduled.
|
|
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
|
|
minAvailable: null
|
|
# -- Maximum number/percentage of pods that may be made unavailable
|
|
maxUnavailable: 1
|
|
|
|
# -- cilium-operator resource limits & requests
|
|
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
resources: {}
|
|
# limits:
|
|
# cpu: 1000m
|
|
# memory: 1Gi
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 128Mi
|
|
|
|
# -- Security context to be added to cilium-operator pods
|
|
securityContext: {}
|
|
# runAsUser: 0
|
|
|
|
# -- Interval for endpoint garbage collection.
|
|
endpointGCInterval: "5m0s"
|
|
|
|
# -- Interval for cilium node garbage collection.
|
|
nodeGCInterval: "5m0s"
|
|
|
|
# -- Skip CNP node status clean up at operator startup.
|
|
skipCNPStatusStartupClean: false
|
|
|
|
# -- Interval for identity garbage collection.
|
|
identityGCInterval: "15m0s"
|
|
|
|
# -- Timeout for identity heartbeats.
|
|
identityHeartbeatTimeout: "30m0s"
|
|
|
|
pprof:
|
|
# -- Enable pprof for cilium-operator
|
|
enabled: false
|
|
# -- Configure pprof listen address for cilium-operator
|
|
address: localhost
|
|
# -- Configure pprof listen port for cilium-operator
|
|
port: 6061
|
|
|
|
# -- Enable prometheus metrics for cilium-operator on the configured port at
|
|
# /metrics
|
|
prometheus:
|
|
enabled: true
|
|
port: 9963
|
|
serviceMonitor:
|
|
# -- Enable service monitors.
|
|
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
|
enabled: false
|
|
# -- Labels to add to ServiceMonitor cilium-operator
|
|
labels: {}
|
|
# -- Annotations to add to ServiceMonitor cilium-operator
|
|
annotations: {}
|
|
# -- jobLabel to add for ServiceMonitor cilium-operator
|
|
jobLabel: ""
|
|
# -- Interval for scrape metrics.
|
|
interval: "10s"
|
|
# -- Relabeling configs for the ServiceMonitor cilium-operator
|
|
relabelings: ~
|
|
# -- Metrics relabeling configs for the ServiceMonitor cilium-operator
|
|
metricRelabelings: ~
|
|
|
|
# -- Grafana dashboards for cilium-operator
|
|
# grafana can import dashboards based on the label and value
|
|
# ref: https://github.com/grafana/helm-charts/tree/main/charts/grafana#sidecar-for-dashboards
|
|
dashboards:
|
|
enabled: false
|
|
label: grafana_dashboard
|
|
namespace: ~
|
|
labelValue: "1"
|
|
annotations: {}
|
|
|
|
# -- Skip CRDs creation for cilium-operator
|
|
skipCRDCreation: false
|
|
|
|
# -- Remove Cilium node taint from Kubernetes nodes that have a healthy Cilium
|
|
# pod running.
|
|
removeNodeTaints: true
|
|
|
|
# -- Taint nodes where Cilium is scheduled but not running. This prevents pods
|
|
# from being scheduled to nodes where Cilium is not the default CNI provider.
|
|
# @default -- same as removeNodeTaints
|
|
setNodeTaints: ~
|
|
|
|
# -- Set Node condition NetworkUnavailable to 'false' with the reason
|
|
# 'CiliumIsUp' for nodes that have a healthy Cilium pod.
|
|
setNodeNetworkStatus: true
|
|
|
|
unmanagedPodWatcher:
|
|
# -- Restart any pod that are not managed by Cilium.
|
|
restart: true
|
|
# -- Interval, in seconds, to check if there are any pods that are not
|
|
# managed by Cilium.
|
|
intervalSeconds: 15
|
|
|
|
nodeinit:
|
|
# -- Enable the node initialization DaemonSet
|
|
enabled: false
|
|
|
|
# -- node-init image.
|
|
image:
|
|
override: ~
|
|
repository: "${CILIUM_NODEINIT_REPO}"
|
|
tag: "${CILIUM_NODEINIT_VERSION}"
|
|
pullPolicy: "${PULL_POLICY}"
|
|
|
|
# -- The priority class to use for the nodeinit pod.
|
|
priorityClassName: ""
|
|
|
|
# -- node-init update strategy
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
|
|
# -- Additional nodeinit environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Additional nodeinit volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional nodeinit volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- Affinity for cilium-nodeinit
|
|
affinity: {}
|
|
|
|
# -- Node labels for nodeinit pod assignment
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
|
|
# -- Node tolerations for nodeinit scheduling to nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations:
|
|
- operator: Exists
|
|
# - key: "key"
|
|
# operator: "Equal|Exists"
|
|
# value: "value"
|
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
|
|
|
# -- Annotations to be added to all top-level nodeinit objects (resources under templates/cilium-nodeinit)
|
|
annotations: {}
|
|
|
|
# -- Annotations to be added to node-init pods.
|
|
podAnnotations: {}
|
|
|
|
# -- Labels to be added to node-init pods.
|
|
podLabels: {}
|
|
|
|
# -- nodeinit resource limits & requests
|
|
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
resources:
|
|
requests:
|
|
cpu: 100m
|
|
memory: 100Mi
|
|
|
|
# -- Security context to be added to nodeinit pods.
|
|
securityContext:
|
|
privileged: false
|
|
seLinuxOptions:
|
|
level: 's0'
|
|
# Running with spc_t since we have removed the privileged mode.
|
|
# Users can change it to a different type as long as they have the
|
|
# type available on the system.
|
|
type: 'spc_t'
|
|
capabilities:
|
|
add:
|
|
# Used in iptables. Consider removing once we are iptables-free
|
|
- SYS_MODULE
|
|
# Used for nsenter
|
|
- NET_ADMIN
|
|
- SYS_ADMIN
|
|
- SYS_CHROOT
|
|
- SYS_PTRACE
|
|
|
|
# -- bootstrapFile is the location of the file where the bootstrap timestamp is
|
|
# written by the node-init DaemonSet
|
|
bootstrapFile: "/tmp/cilium-bootstrap.d/cilium-bootstrap-time"
|
|
|
|
# -- startup offers way to customize startup nodeinit script (pre and post position)
|
|
startup:
|
|
preScript: ""
|
|
postScript: ""
|
|
# -- prestop offers way to customize prestop nodeinit script (pre and post position)
|
|
prestop:
|
|
preScript: ""
|
|
postScript: ""
|
|
|
|
preflight:
|
|
# -- Enable Cilium pre-flight resources (required for upgrade)
|
|
enabled: false
|
|
|
|
# -- Cilium pre-flight image.
|
|
image:
|
|
override: ~
|
|
repository: "${CILIUM_REPO}"
|
|
tag: "${CILIUM_VERSION}"
|
|
# cilium-digest
|
|
digest: ${CILIUM_DIGEST}
|
|
useDigest: ${USE_DIGESTS}
|
|
pullPolicy: "${PULL_POLICY}"
|
|
|
|
# -- The priority class to use for the preflight pod.
|
|
priorityClassName: ""
|
|
|
|
# -- preflight update strategy
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
|
|
# -- Additional preflight environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Additional preflight volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional preflight volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- Affinity for cilium-preflight
|
|
affinity:
|
|
podAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- topologyKey: kubernetes.io/hostname
|
|
labelSelector:
|
|
matchLabels:
|
|
k8s-app: cilium
|
|
|
|
# -- Node labels for preflight pod assignment
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
|
|
# -- Node tolerations for preflight scheduling to nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations:
|
|
- key: node.kubernetes.io/not-ready
|
|
effect: NoSchedule
|
|
- key: node-role.kubernetes.io/master
|
|
effect: NoSchedule
|
|
- key: node-role.kubernetes.io/control-plane
|
|
effect: NoSchedule
|
|
- key: node.cloudprovider.kubernetes.io/uninitialized
|
|
effect: NoSchedule
|
|
value: "true"
|
|
- key: CriticalAddonsOnly
|
|
operator: "Exists"
|
|
# - key: "key"
|
|
# operator: "Equal|Exists"
|
|
# value: "value"
|
|
# effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
|
|
|
|
# -- Annotations to be added to all top-level preflight objects (resources under templates/cilium-preflight)
|
|
annotations: {}
|
|
|
|
# -- Security context to be added to preflight pods.
|
|
podSecurityContext: {}
|
|
|
|
# -- Annotations to be added to preflight pods
|
|
podAnnotations: {}
|
|
|
|
# -- Labels to be added to the preflight pod.
|
|
podLabels: {}
|
|
|
|
# PodDisruptionBudget settings
|
|
podDisruptionBudget:
|
|
# -- enable PodDisruptionBudget
|
|
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
|
|
enabled: false
|
|
# -- Minimum number/percentage of pods that should remain scheduled.
|
|
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
|
|
minAvailable: null
|
|
# -- Maximum number/percentage of pods that may be made unavailable
|
|
maxUnavailable: 1
|
|
|
|
# -- preflight resource limits & requests
|
|
# ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
|
resources: {}
|
|
# limits:
|
|
# cpu: 4000m
|
|
# memory: 4Gi
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 512Mi
|
|
|
|
# -- Security context to be added to preflight pods
|
|
securityContext: {}
|
|
# runAsUser: 0
|
|
|
|
# -- Path to write the `--tofqdns-pre-cache` file to.
|
|
tofqdnsPreCache: ""
|
|
|
|
# -- Configure termination grace period for preflight Deployment and DaemonSet.
|
|
terminationGracePeriodSeconds: 1
|
|
|
|
# -- By default we should always validate the installed CNPs before upgrading
|
|
# Cilium. This will make sure the user will have the policies deployed in the
|
|
# cluster with the right schema.
|
|
validateCNPs: true
|
|
|
|
# -- Explicitly enable or disable priority class.
|
|
# .Capabilities.KubeVersion is unsettable in `helm template` calls,
|
|
# it depends on k8s libraries version that Helm was compiled against.
|
|
# This option allows to explicitly disable setting the priority class, which
|
|
# is useful for rendering charts for gke clusters in advance.
|
|
enableCriticalPriorityClass: true
|
|
|
|
# disableEnvoyVersionCheck removes the check for Envoy, which can be useful
|
|
# on AArch64 as the images do not currently ship a version of Envoy.
|
|
#disableEnvoyVersionCheck: false
|
|
|
|
clustermesh:
|
|
# -- Deploy clustermesh-apiserver for clustermesh
|
|
useAPIServer: false
|
|
|
|
# -- Annotations to be added to all top-level clustermesh objects (resources under templates/clustermesh-apiserver and templates/clustermesh-config)
|
|
annotations: {}
|
|
|
|
# -- Clustermesh explicit configuration.
|
|
config:
|
|
# -- Enable the Clustermesh explicit configuration.
|
|
enabled: false
|
|
# -- Default dns domain for the Clustermesh API servers
|
|
# This is used in the case cluster addresses are not provided
|
|
# and IPs are used.
|
|
domain: mesh.cilium.io
|
|
# -- List of clusters to be peered in the mesh.
|
|
clusters: []
|
|
# clusters:
|
|
# # -- Name of the cluster
|
|
# - name: cluster1
|
|
# # -- Address of the cluster, use this if you created DNS records for
|
|
# # the cluster Clustermesh API server.
|
|
# address: cluster1.mesh.cilium.io
|
|
# # -- Port of the cluster Clustermesh API server.
|
|
# port: 2379
|
|
# # -- IPs of the cluster Clustermesh API server, use multiple ones when
|
|
# # you have multiple IPs to access the Clustermesh API server.
|
|
# ips:
|
|
# - 172.18.255.201
|
|
# # -- base64 encoded PEM values for the cluster client certificate, private key and certificate authority.
|
|
# # These fields can (and should) be omitted in case the CA is shared across clusters. In that case, the
|
|
# # "remote" private key and certificate available in the local cluster are automatically used instead.
|
|
# tls:
|
|
# cert: ""
|
|
# key: ""
|
|
# caCert: ""
|
|
|
|
apiserver:
|
|
# -- Clustermesh API server image.
|
|
image:
|
|
override: ~
|
|
repository: "${CLUSTERMESH_APISERVER_REPO}"
|
|
tag: "${CILIUM_VERSION}"
|
|
# clustermesh-apiserver-digest
|
|
digest: ${CLUSTERMESH_APISERVER_DIGEST}
|
|
useDigest: ${USE_DIGESTS}
|
|
pullPolicy: "${PULL_POLICY}"
|
|
|
|
etcd:
|
|
# -- Clustermesh API server etcd image.
|
|
image:
|
|
override: ~
|
|
repository: "${ETCD_REPO}"
|
|
tag: "${ETCD_VERSION}"
|
|
digest: "${ETCD_DIGEST}"
|
|
useDigest: true
|
|
pullPolicy: "${PULL_POLICY}"
|
|
|
|
# -- Specifies the resources for etcd container in the apiserver
|
|
resources: {}
|
|
# requests:
|
|
# cpu: 200m
|
|
# memory: 256Mi
|
|
# limits:
|
|
# cpu: 1000m
|
|
# memory: 256Mi
|
|
|
|
# -- Security context to be added to clustermesh-apiserver etcd containers
|
|
securityContext: {}
|
|
|
|
# -- lifecycle setting for the etcd container
|
|
lifecycle: {}
|
|
|
|
init:
|
|
# -- Specifies the resources for etcd init container in the apiserver
|
|
resources: {}
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 100Mi
|
|
# limits:
|
|
# cpu: 100m
|
|
# memory: 100Mi
|
|
|
|
kvstoremesh:
|
|
# -- Enable KVStoreMesh. KVStoreMesh caches the information retrieved
|
|
# from the remote clusters in the local etcd instance.
|
|
enabled: false
|
|
|
|
# -- KVStoreMesh image.
|
|
image:
|
|
override: ~
|
|
repository: "${KVSTOREMESH_REPO}"
|
|
tag: "${CILIUM_VERSION}"
|
|
# kvstoremesh-digest
|
|
digest: ${KVSTOREMESH_DIGEST}
|
|
useDigest: ${USE_DIGESTS}
|
|
pullPolicy: "${PULL_POLICY}"
|
|
|
|
# -- Additional KVStoreMesh arguments.
|
|
extraArgs: []
|
|
|
|
# -- Additional KVStoreMesh environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Resource requests and limits for the KVStoreMesh container
|
|
resources: {}
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 64Mi
|
|
# limits:
|
|
# cpu: 1000m
|
|
# memory: 1024M
|
|
|
|
# -- Additional KVStoreMesh volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- KVStoreMesh Security context
|
|
securityContext:
|
|
allowPrivilegeEscalation: false
|
|
capabilities:
|
|
drop:
|
|
- ALL
|
|
|
|
# -- lifecycle setting for the KVStoreMesh container
|
|
lifecycle: {}
|
|
|
|
service:
|
|
# -- The type of service used for apiserver access.
|
|
type: NodePort
|
|
# -- Optional port to use as the node port for apiserver access.
|
|
#
|
|
# WARNING: make sure to configure a different NodePort in each cluster if
|
|
# kube-proxy replacement is enabled, as Cilium is currently affected by a known
|
|
# bug (#24692) when NodePorts are handled by the KPR implementation. If a service
|
|
# with the same NodePort exists both in the local and the remote cluster, all
|
|
# traffic originating from inside the cluster and targeting the corresponding
|
|
# NodePort will be redirected to a local backend, regardless of whether the
|
|
# destination node belongs to the local or the remote cluster.
|
|
nodePort: 32379
|
|
# -- Optional loadBalancer IP address to use with type LoadBalancer.
|
|
# loadBalancerIP:
|
|
|
|
# -- Annotations for the clustermesh-apiserver
|
|
# For GKE LoadBalancer, use annotation cloud.google.com/load-balancer-type: "Internal"
|
|
# For EKS LoadBalancer, use annotation service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
|
|
annotations: {}
|
|
|
|
# -- The externalTrafficPolicy of service used for apiserver access.
|
|
externalTrafficPolicy:
|
|
|
|
# -- The internalTrafficPolicy of service used for apiserver access.
|
|
internalTrafficPolicy:
|
|
|
|
# -- Number of replicas run for the clustermesh-apiserver deployment.
|
|
replicas: 1
|
|
|
|
# -- lifecycle setting for the apiserver container
|
|
lifecycle: {}
|
|
|
|
# -- terminationGracePeriodSeconds for the clustermesh-apiserver deployment
|
|
terminationGracePeriodSeconds: 30
|
|
|
|
# -- Additional clustermesh-apiserver arguments.
|
|
extraArgs: []
|
|
|
|
# -- Additional clustermesh-apiserver environment variables.
|
|
extraEnv: []
|
|
|
|
# -- Additional clustermesh-apiserver volumes.
|
|
extraVolumes: []
|
|
|
|
# -- Additional clustermesh-apiserver volumeMounts.
|
|
extraVolumeMounts: []
|
|
|
|
# -- Security context to be added to clustermesh-apiserver containers
|
|
securityContext: {}
|
|
|
|
# -- Security context to be added to clustermesh-apiserver pods
|
|
podSecurityContext: {}
|
|
|
|
# -- Annotations to be added to clustermesh-apiserver pods
|
|
podAnnotations: {}
|
|
|
|
# -- Labels to be added to clustermesh-apiserver pods
|
|
podLabels: {}
|
|
|
|
# PodDisruptionBudget settings
|
|
podDisruptionBudget:
|
|
# -- enable PodDisruptionBudget
|
|
# ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
|
|
enabled: false
|
|
# -- Minimum number/percentage of pods that should remain scheduled.
|
|
# When it's set, maxUnavailable must be disabled by `maxUnavailable: null`
|
|
minAvailable: null
|
|
# -- Maximum number/percentage of pods that may be made unavailable
|
|
maxUnavailable: 1
|
|
|
|
# -- Resource requests and limits for the clustermesh-apiserver container of the clustermesh-apiserver deployment, such as
|
|
# resources:
|
|
# limits:
|
|
# cpu: 1000m
|
|
# memory: 1024M
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 64Mi
|
|
# -- Resource requests and limits for the clustermesh-apiserver
|
|
resources: {}
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 64Mi
|
|
# limits:
|
|
# cpu: 1000m
|
|
# memory: 1024M
|
|
|
|
# -- Affinity for clustermesh.apiserver
|
|
affinity:
|
|
podAntiAffinity:
|
|
requiredDuringSchedulingIgnoredDuringExecution:
|
|
- topologyKey: kubernetes.io/hostname
|
|
labelSelector:
|
|
matchLabels:
|
|
k8s-app: clustermesh-apiserver
|
|
|
|
# -- Pod topology spread constraints for clustermesh-apiserver
|
|
topologySpreadConstraints: []
|
|
# - maxSkew: 1
|
|
# topologyKey: topology.kubernetes.io/zone
|
|
# whenUnsatisfiable: DoNotSchedule
|
|
|
|
# -- Node labels for pod assignment
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
|
nodeSelector:
|
|
kubernetes.io/os: linux
|
|
|
|
# -- Node tolerations for pod assignment on nodes with taints
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations: []
|
|
|
|
# -- clustermesh-apiserver update strategy
|
|
updateStrategy:
|
|
type: RollingUpdate
|
|
rollingUpdate:
|
|
maxUnavailable: 1
|
|
|
|
# -- The priority class to use for clustermesh-apiserver
|
|
priorityClassName: ""
|
|
|
|
tls:
|
|
# -- Configure the clustermesh authentication mode.
|
|
# Supported values:
|
|
# - legacy: All clusters access remote clustermesh instances with the same
|
|
# username (i.e., remote). The "remote" certificate must be
|
|
# generated with CN=remote if provided manually.
|
|
# - migration: Intermediate mode required to upgrade from legacy to cluster
|
|
# (and vice versa) with no disruption. Specifically, it enables
|
|
# the creation of the per-cluster usernames, while still using
|
|
# the common one for authentication. The "remote" certificate must
|
|
# be generated with CN=remote if provided manually (same as legacy).
|
|
# - cluster: Each cluster accesses remote etcd instances with a username
|
|
# depending on the local cluster name (i.e., remote-<cluster-name>).
|
|
# The "remote" certificate must be generated with CN=remote-<cluster-name>
|
|
# if provided manually. Cluster mode is meaningful only when the same
|
|
# CA is shared across all clusters part of the mesh.
|
|
authMode: legacy
|
|
|
|
# -- Configure automatic TLS certificates generation.
|
|
# A Kubernetes CronJob is used the generate any
|
|
# certificates not provided by the user at installation
|
|
# time.
|
|
auto:
|
|
# -- When set to true, automatically generate a CA and certificates to
|
|
# enable mTLS between clustermesh-apiserver and external workload instances.
|
|
# If set to false, the certs to be provided by setting appropriate values below.
|
|
enabled: true
|
|
# Sets the method to auto-generate certificates. Supported values:
|
|
# - helm: This method uses Helm to generate all certificates.
|
|
# - cronJob: This method uses a Kubernetes CronJob the generate any
|
|
# certificates not provided by the user at installation
|
|
# time.
|
|
# - certmanager: This method use cert-manager to generate & rotate certificates.
|
|
method: helm
|
|
# -- Generated certificates validity duration in days.
|
|
certValidityDuration: 1095
|
|
# -- Schedule for certificates regeneration (regardless of their expiration date).
|
|
# Only used if method is "cronJob". If nil, then no recurring job will be created.
|
|
# Instead, only the one-shot job is deployed to generate the certificates at
|
|
# installation time.
|
|
#
|
|
# Due to the out-of-band distribution of client certs to external workloads the
|
|
# CA is (re)regenerated only if it is not provided as a helm value and the k8s
|
|
# secret is manually deleted.
|
|
#
|
|
# Defaults to none. Commented syntax gives midnight of the first day of every
|
|
# fourth month. For syntax, see
|
|
# https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#schedule-syntax
|
|
# schedule: "0 0 1 */4 *"
|
|
|
|
# [Example]
|
|
# certManagerIssuerRef:
|
|
# group: cert-manager.io
|
|
# kind: ClusterIssuer
|
|
# name: ca-issuer
|
|
# -- certmanager issuer used when clustermesh.apiserver.tls.auto.method=certmanager.
|
|
certManagerIssuerRef: {}
|
|
# -- base64 encoded PEM values for the clustermesh-apiserver server certificate and private key.
|
|
# Used if 'auto' is not enabled.
|
|
server:
|
|
cert: ""
|
|
key: ""
|
|
# -- Extra DNS names added to certificate when it's auto generated
|
|
extraDnsNames: []
|
|
# -- Extra IP addresses added to certificate when it's auto generated
|
|
extraIpAddresses: []
|
|
# -- base64 encoded PEM values for the clustermesh-apiserver admin certificate and private key.
|
|
# Used if 'auto' is not enabled.
|
|
admin:
|
|
cert: ""
|
|
key: ""
|
|
# -- base64 encoded PEM values for the clustermesh-apiserver client certificate and private key.
|
|
# Used if 'auto' is not enabled.
|
|
client:
|
|
cert: ""
|
|
key: ""
|
|
# -- base64 encoded PEM values for the clustermesh-apiserver remote cluster certificate and private key.
|
|
# Used if 'auto' is not enabled.
|
|
remote:
|
|
cert: ""
|
|
key: ""
|
|
|
|
# clustermesh-apiserver Prometheus metrics configuration
|
|
metrics:
|
|
# -- Enables exporting apiserver metrics in OpenMetrics format.
|
|
enabled: true
|
|
# -- Configure the port the apiserver metric server listens on.
|
|
port: 9962
|
|
|
|
kvstoremesh:
|
|
# -- Enables exporting KVStoreMesh metrics in OpenMetrics format.
|
|
enabled: true
|
|
# -- Configure the port the KVStoreMesh metric server listens on.
|
|
port: 9964
|
|
|
|
etcd:
|
|
# -- Enables exporting etcd metrics in OpenMetrics format.
|
|
enabled: true
|
|
# -- Set level of detail for etcd metrics; specify 'extensive' to include server side gRPC histogram metrics.
|
|
mode: basic
|
|
# -- Configure the port the etcd metric server listens on.
|
|
port: 9963
|
|
|
|
serviceMonitor:
|
|
# -- Enable service monitor.
|
|
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
|
|
enabled: false
|
|
# -- Labels to add to ServiceMonitor clustermesh-apiserver
|
|
labels: {}
|
|
# -- Annotations to add to ServiceMonitor clustermesh-apiserver
|
|
annotations: {}
|
|
# -- Specify the Kubernetes namespace where Prometheus expects to find
|
|
# service monitors configured.
|
|
# namespace: ""
|
|
|
|
# -- Interval for scrape metrics (apiserver metrics)
|
|
interval: "10s"
|
|
# -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics)
|
|
relabelings: ~
|
|
# -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (apiserver metrics)
|
|
metricRelabelings: ~
|
|
|
|
kvstoremesh:
|
|
# -- Interval for scrape metrics (KVStoreMesh metrics)
|
|
interval: "10s"
|
|
# -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics)
|
|
relabelings: ~
|
|
# -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (KVStoreMesh metrics)
|
|
metricRelabelings: ~
|
|
|
|
etcd:
|
|
# -- Interval for scrape metrics (etcd metrics)
|
|
interval: "10s"
|
|
# -- Relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
|
|
relabelings: ~
|
|
# -- Metrics relabeling configs for the ServiceMonitor clustermesh-apiserver (etcd metrics)
|
|
metricRelabelings: ~
|
|
|
|
# -- Configure external workloads support
|
|
externalWorkloads:
|
|
# -- Enable support for external workloads, such as VMs (false by default).
|
|
enabled: false
|
|
|
|
# -- Configure cgroup related configuration
|
|
cgroup:
|
|
autoMount:
|
|
# -- Enable auto mount of cgroup2 filesystem.
|
|
# When `autoMount` is enabled, cgroup2 filesystem is mounted at
|
|
# `cgroup.hostRoot` path on the underlying host and inside the cilium agent pod.
|
|
# If users disable `autoMount`, it's expected that users have mounted
|
|
# cgroup2 filesystem at the specified `cgroup.hostRoot` volume, and then the
|
|
# volume will be mounted inside the cilium agent pod at the same path.
|
|
enabled: true
|
|
# -- Init Container Cgroup Automount resource limits & requests
|
|
resources: {}
|
|
# limits:
|
|
# cpu: 100m
|
|
# memory: 128Mi
|
|
# requests:
|
|
# cpu: 100m
|
|
# memory: 128Mi
|
|
# -- Configure cgroup root where cgroup2 filesystem is mounted on the host (see also: `cgroup.autoMount`)
|
|
hostRoot: /run/cilium/cgroupv2
|
|
|
|
# -- Configure whether to enable auto detect of terminating state for endpoints
|
|
# in order to support graceful termination.
|
|
enableK8sTerminatingEndpoint: true
|
|
|
|
# -- Configure whether to unload DNS policy rules on graceful shutdown
|
|
# dnsPolicyUnloadOnShutdown: false
|
|
|
|
# -- Configure the key of the taint indicating that Cilium is not ready on the node.
|
|
# When set to a value starting with `ignore-taint.cluster-autoscaler.kubernetes.io/`, the Cluster Autoscaler will ignore the taint on its decisions, allowing the cluster to scale up.
|
|
agentNotReadyTaintKey: "node.cilium.io/agent-not-ready"
|
|
|
|
dnsProxy:
|
|
# -- DNS response code for rejecting DNS requests, available options are '[nameError refused]'.
|
|
dnsRejectResponseCode: refused
|
|
# -- Allow the DNS proxy to compress responses to endpoints that are larger than 512 Bytes or the EDNS0 option, if present.
|
|
enableDnsCompression: true
|
|
# -- Maximum number of IPs to maintain per FQDN name for each endpoint.
|
|
endpointMaxIpPerHostname: 50
|
|
# -- Time during which idle but previously active connections with expired DNS lookups are still considered alive.
|
|
idleConnectionGracePeriod: 0s
|
|
# -- Maximum number of IPs to retain for expired DNS lookups with still-active connections.
|
|
maxDeferredConnectionDeletes: 10000
|
|
# -- The minimum time, in seconds, to use DNS data for toFQDNs policies. If
|
|
# the upstream DNS server returns a DNS record with a shorter TTL, Cilium
|
|
# overwrites the TTL with this value. Setting this value to zero means that
|
|
# Cilium will honor the TTLs returned by the upstream DNS server.
|
|
minTtl: 0
|
|
# -- DNS cache data at this path is preloaded on agent startup.
|
|
preCache: ""
|
|
# -- Global port on which the in-agent DNS proxy should listen. Default 0 is a OS-assigned port.
|
|
proxyPort: 0
|
|
# -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
|
|
proxyResponseMaxDelay: 100ms
|
|
|
|
# -- SCTP Configuration Values
|
|
sctp:
|
|
# -- Enable SCTP support. NOTE: Currently, SCTP support does not support rewriting ports or multihoming.
|
|
enabled: false
|
|
|
|
# Configuration for types of authentication for Cilium (beta)
|
|
authentication:
|
|
# -- Enable authentication processing and garbage collection.
|
|
# Note that if disabled, policy enforcement will still block requests that require authentication.
|
|
# But the resulting authentication requests for these requests will not be processed, therefore the requests not be allowed.
|
|
enabled: true
|
|
# -- Buffer size of the channel Cilium uses to receive authentication events from the signal map.
|
|
queueSize: 1024
|
|
# -- Buffer size of the channel Cilium uses to receive certificate expiration events from auth handlers.
|
|
rotatedIdentitiesQueueSize: 1024
|
|
# -- Interval for garbage collection of auth map entries.
|
|
gcInterval: "5m0s"
|
|
# Configuration for Cilium's service-to-service mutual authentication using TLS handshakes.
|
|
# Note that this is not full mTLS support without also enabling encryption of some form.
|
|
# Current encryption options are Wireguard or IPSec, configured in encryption block above.
|
|
mutual:
|
|
# -- Port on the agent where mutual authentication handshakes between agents will be performed
|
|
port: 4250
|
|
# -- Timeout for connecting to the remote node TCP socket
|
|
connectTimeout: 5s
|
|
# Settings for SPIRE
|
|
spire:
|
|
# -- Enable SPIRE integration (beta)
|
|
enabled: false
|
|
# -- Annotations to be added to all top-level spire objects (resources under templates/spire)
|
|
annotations: {}
|
|
# Settings to control the SPIRE installation and configuration
|
|
install:
|
|
# -- Enable SPIRE installation.
|
|
# This will only take effect only if authentication.mutual.spire.enabled is true
|
|
enabled: true
|
|
# -- SPIRE namespace to install into
|
|
namespace: cilium-spire
|
|
# -- init container image of SPIRE agent and server
|
|
initImage:
|
|
override: ~
|
|
repository: "${SPIRE_INIT_REPO}"
|
|
tag: "${SPIRE_INIT_VERSION}"
|
|
digest: "${SPIRE_INIT_DIGEST}"
|
|
useDigest: true
|
|
pullPolicy: "${PULL_POLICY}"
|
|
# SPIRE agent configuration
|
|
agent:
|
|
# -- SPIRE agent image
|
|
image:
|
|
override: ~
|
|
repository: "${SPIRE_AGENT_REPO}"
|
|
tag: "${SPIRE_AGENT_VERSION}"
|
|
digest: "${SPIRE_AGENT_DIGEST}"
|
|
useDigest: true
|
|
pullPolicy: "${PULL_POLICY}"
|
|
# -- SPIRE agent service account
|
|
serviceAccount:
|
|
create: true
|
|
name: spire-agent
|
|
# -- SPIRE agent annotations
|
|
annotations: {}
|
|
# -- SPIRE agent labels
|
|
labels: {}
|
|
# -- SPIRE Workload Attestor kubelet verification.
|
|
skipKubeletVerification: true
|
|
# -- SPIRE agent tolerations configuration
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations: []
|
|
server:
|
|
# -- SPIRE server image
|
|
image:
|
|
override: ~
|
|
repository: "${SPIRE_SERVER_REPO}"
|
|
tag: "${SPIRE_SERVER_VERSION}"
|
|
digest: "${SPIRE_SERVER_DIGEST}"
|
|
useDigest: true
|
|
pullPolicy: "${PULL_POLICY}"
|
|
# -- SPIRE server service account
|
|
serviceAccount:
|
|
create: true
|
|
name: spire-server
|
|
# -- SPIRE server init containers
|
|
initContainers: []
|
|
# -- SPIRE server annotations
|
|
annotations: {}
|
|
# -- SPIRE server labels
|
|
labels: {}
|
|
# SPIRE server service configuration
|
|
service:
|
|
# -- Service type for the SPIRE server service
|
|
type: ClusterIP
|
|
# -- Annotations to be added to the SPIRE server service
|
|
annotations: {}
|
|
# -- Labels to be added to the SPIRE server service
|
|
labels: {}
|
|
# -- SPIRE server affinity configuration
|
|
affinity: {}
|
|
# -- SPIRE server nodeSelector configuration
|
|
# ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
|
|
nodeSelector: {}
|
|
# -- SPIRE server tolerations configuration
|
|
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
|
|
tolerations: []
|
|
# SPIRE server datastorage configuration
|
|
dataStorage:
|
|
# -- Enable SPIRE server data storage
|
|
enabled: true
|
|
# -- Size of the SPIRE server data storage
|
|
size: 1Gi
|
|
# -- Access mode of the SPIRE server data storage
|
|
accessMode: ReadWriteOnce
|
|
# -- StorageClass of the SPIRE server data storage
|
|
storageClass: null
|
|
# -- Security context to be added to spire server pods.
|
|
# SecurityContext holds pod-level security attributes and common container settings.
|
|
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
|
|
podSecurityContext: {}
|
|
# -- Security context to be added to spire server containers.
|
|
# SecurityContext holds pod-level security attributes and common container settings.
|
|
# ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
|
|
securityContext: {}
|
|
# SPIRE CA configuration
|
|
ca:
|
|
# -- SPIRE CA key type
|
|
# AWS requires the use of RSA. EC cryptography is not supported
|
|
keyType: "rsa-4096"
|
|
# -- SPIRE CA Subject
|
|
subject:
|
|
country: "US"
|
|
organization: "SPIRE"
|
|
commonName: "Cilium SPIRE CA"
|
|
# -- SPIRE server address used by Cilium Operator
|
|
#
|
|
# If k8s Service DNS along with port number is used (e.g. <service-name>.<namespace>.svc(.*):<port-number> format),
|
|
# Cilium Operator will resolve its address by looking up the clusterIP from Service resource.
|
|
#
|
|
# Example values: 10.0.0.1:8081, spire-server.cilium-spire.svc:8081
|
|
serverAddress: ~
|
|
# -- SPIFFE trust domain to use for fetching certificates
|
|
trustDomain: spiffe.cilium
|
|
# -- SPIRE socket path where the SPIRE delegated api agent is listening
|
|
adminSocketPath: /run/spire/sockets/admin.sock
|
|
# -- SPIRE socket path where the SPIRE workload agent is listening.
|
|
# Applies to both the Cilium Agent and Operator
|
|
agentSocketPath: /run/spire/sockets/agent/agent.sock
|
|
# -- SPIRE connection timeout
|
|
connectionTimeout: 30s
|