helm: bump Cilium to v1.15.0-edg.1

This commit is contained in:
Markus Rudy 2024-05-29 16:21:56 +02:00
parent ae5323ae0a
commit 9405b8abeb
21 changed files with 215 additions and 59 deletions

View File

@ -2,10 +2,10 @@ apiVersion: v2
name: cilium
displayName: Cilium
home: https://cilium.io/
version: 1.15.0-pre.3-edg.3
appVersion: 1.15.0-pre.3-edg.3
version: 1.15.0-edg.1
appVersion: 1.15.0-edg.1
kubeVersion: ">= 1.16.0-0"
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.15/Documentation/images/logo-solo.svg
description: eBPF-based Networking, Security, and Observability
keywords:
- BPF

View File

@ -1,6 +1,6 @@
# cilium
![Version: 1.15.0-pre.3](https://img.shields.io/badge/Version-1.15.0--pre.3-informational?style=flat-square) ![AppVersion: 1.15.0-pre.3](https://img.shields.io/badge/AppVersion-1.15.0--pre.3-informational?style=flat-square)
![Version: 1.15.0](https://img.shields.io/badge/Version-1.15.0-informational?style=flat-square) ![AppVersion: 1.15.0](https://img.shields.io/badge/AppVersion-1.15.0-informational?style=flat-square)
Cilium is open source software for providing and transparently securing
network connectivity and loadbalancing between application workloads such as
@ -73,15 +73,16 @@ contributors across the globe, there is almost always someone available to help.
| authentication.mutual.spire.enabled | bool | `false` | Enable SPIRE integration (beta) |
| authentication.mutual.spire.install.agent.affinity | object | `{}` | SPIRE agent affinity configuration |
| authentication.mutual.spire.install.agent.annotations | object | `{}` | SPIRE agent annotations |
| authentication.mutual.spire.install.agent.image | object | `{"digest":"sha256:d489bc8470d7a0f292e0e3576c3e7025253343dc798241bcfd9061828e2a6bef","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-agent","tag":"1.8.4","useDigest":true}` | SPIRE agent image |
| authentication.mutual.spire.install.agent.image | object | `{"digest":"sha256:99405637647968245ff9fe215f8bd2bd0ea9807be9725f8bf19fe1b21471e52b","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-agent","tag":"1.8.5","useDigest":true}` | SPIRE agent image |
| authentication.mutual.spire.install.agent.labels | object | `{}` | SPIRE agent labels |
| authentication.mutual.spire.install.agent.nodeSelector | object | `{}` | SPIRE agent nodeSelector configuration ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
| authentication.mutual.spire.install.agent.podSecurityContext | object | `{}` | Security context to be added to spire agent pods. SecurityContext holds pod-level security attributes and common container settings. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod |
| authentication.mutual.spire.install.agent.securityContext | object | `{}` | Security context to be added to spire agent containers. SecurityContext holds pod-level security attributes and common container settings. ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container |
| authentication.mutual.spire.install.agent.serviceAccount | object | `{"create":true,"name":"spire-agent"}` | SPIRE agent service account |
| authentication.mutual.spire.install.agent.skipKubeletVerification | bool | `true` | SPIRE Workload Attestor kubelet verification. |
| authentication.mutual.spire.install.agent.tolerations | list | `[]` | SPIRE agent tolerations configuration ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
| authentication.mutual.spire.install.agent.tolerations | list | `[{"effect":"NoSchedule","key":"node.kubernetes.io/not-ready"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/master"},{"effect":"NoSchedule","key":"node-role.kubernetes.io/control-plane"},{"effect":"NoSchedule","key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true"},{"key":"CriticalAddonsOnly","operator":"Exists"}]` | SPIRE agent tolerations configuration By default it follows the same tolerations as the agent itself to allow the Cilium agent on this node to connect to SPIRE. ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ |
| authentication.mutual.spire.install.enabled | bool | `true` | Enable SPIRE installation. This will only take effect only if authentication.mutual.spire.enabled is true |
| authentication.mutual.spire.install.existingNamespace | bool | `false` | SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace. |
| authentication.mutual.spire.install.initImage | object | `{"digest":"sha256:223ae047b1065bd069aac01ae3ac8088b3ca4a527827e283b85112f29385fb1b","override":null,"pullPolicy":"IfNotPresent","repository":"docker.io/library/busybox","tag":"1.36.1","useDigest":true}` | init container image of SPIRE agent and server |
| authentication.mutual.spire.install.namespace | string | `"cilium-spire"` | SPIRE namespace to install into |
| authentication.mutual.spire.install.server.affinity | object | `{}` | SPIRE server affinity configuration |
@ -92,7 +93,7 @@ contributors across the globe, there is almost always someone available to help.
| authentication.mutual.spire.install.server.dataStorage.enabled | bool | `true` | Enable SPIRE server data storage |
| authentication.mutual.spire.install.server.dataStorage.size | string | `"1Gi"` | Size of the SPIRE server data storage |
| authentication.mutual.spire.install.server.dataStorage.storageClass | string | `nil` | StorageClass of the SPIRE server data storage |
| authentication.mutual.spire.install.server.image | object | `{"digest":"sha256:bf79e0a921f8b8aa92602f7ea335616e72f7e91f939848e7ccc52d5bddfe96a1","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-server","tag":"1.8.4","useDigest":true}` | SPIRE server image |
| authentication.mutual.spire.install.server.image | object | `{"digest":"sha256:28269265882048dcf0fed32fe47663cd98613727210b8d1a55618826f9bf5428","override":null,"pullPolicy":"IfNotPresent","repository":"ghcr.io/spiffe/spire-server","tag":"1.8.5","useDigest":true}` | SPIRE server image |
| authentication.mutual.spire.install.server.initContainers | list | `[]` | SPIRE server init containers |
| authentication.mutual.spire.install.server.labels | object | `{}` | SPIRE server labels |
| authentication.mutual.spire.install.server.nodeSelector | object | `{}` | SPIRE server nodeSelector configuration ref: ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
@ -169,7 +170,7 @@ contributors across the globe, there is almost always someone available to help.
| clustermesh.apiserver.extraEnv | list | `[]` | Additional clustermesh-apiserver environment variables. |
| clustermesh.apiserver.extraVolumeMounts | list | `[]` | Additional clustermesh-apiserver volumeMounts. |
| clustermesh.apiserver.extraVolumes | list | `[]` | Additional clustermesh-apiserver volumes. |
| clustermesh.apiserver.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.15.0-pre.3","useDigest":false}` | Clustermesh API server image. |
| clustermesh.apiserver.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/clustermesh-apiserver","tag":"v1.15.0","useDigest":false}` | Clustermesh API server image. |
| clustermesh.apiserver.kvstoremesh.enabled | bool | `false` | Enable KVStoreMesh. KVStoreMesh caches the information retrieved from the remote clusters in the local etcd instance. |
| clustermesh.apiserver.kvstoremesh.extraArgs | list | `[]` | Additional KVStoreMesh arguments. |
| clustermesh.apiserver.kvstoremesh.extraEnv | list | `[]` | Additional KVStoreMesh environment variables. |
@ -333,7 +334,7 @@ contributors across the globe, there is almost always someone available to help.
| envoy.extraVolumes | list | `[]` | Additional envoy volumes. |
| envoy.healthPort | int | `9878` | TCP port for the health API. |
| envoy.idleTimeoutDurationSeconds | int | `60` | Set Envoy upstream HTTP idle connection timeout seconds. Does not apply to connections with pending requests. Default 60s |
| envoy.image | object | `{"digest":"sha256:80de27c1d16ab92923cc0cd1fff90f2e7047a9abf3906fda712268d9cbc5b950","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.27.2-f19708f3d0188fe39b7e024b4525b75a9eeee61f","useDigest":true}` | Envoy container image. |
| envoy.image | object | `{"digest":"sha256:bf37c46d3d6bd5f51ff11d09de81671ced070e27912e080083c58a6d3fbb740f","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium-envoy","tag":"v1.27.2-13f6142b9c02268b10d547c8b093ef16724538e3","useDigest":true}` | Envoy container image. |
| envoy.livenessProbe.failureThreshold | int | `10` | failure threshold of liveness probe |
| envoy.livenessProbe.periodSeconds | int | `30` | interval between checks of the liveness probe |
| envoy.log.format | string | `"[%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v"` | The format string to use for laying out the log message metadata of Envoy. |
@ -345,14 +346,15 @@ contributors across the globe, there is almost always someone available to help.
| envoy.podLabels | object | `{}` | Labels to be added to envoy pods |
| envoy.podSecurityContext | object | `{}` | Security Context for cilium-envoy pods. |
| envoy.priorityClassName | string | `nil` | The priority class to use for cilium-envoy. |
| envoy.prometheus | object | `{"enabled":true,"port":"9964","serviceMonitor":{"annotations":{},"enabled":false,"interval":"10s","labels":{},"metricRelabelings":null,"relabelings":[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]}}` | Configure Cilium Envoy Prometheus options. Note that some of these apply to either cilium-agent or cilium-envoy. |
| envoy.prometheus.enabled | bool | `true` | Enable prometheus metrics for cilium-envoy |
| envoy.prometheus.port | string | `"9964"` | Serve prometheus metrics for cilium-envoy on the configured port |
| envoy.prometheus.serviceMonitor.annotations | object | `{}` | Annotations to add to ServiceMonitor cilium-envoy |
| envoy.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) |
| envoy.prometheus.serviceMonitor.enabled | bool | `false` | Enable service monitors. This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml) Note that this setting applies to both cilium-envoy _and_ cilium-agent with Envoy enabled. |
| envoy.prometheus.serviceMonitor.interval | string | `"10s"` | Interval for scrape metrics. |
| envoy.prometheus.serviceMonitor.labels | object | `{}` | Labels to add to ServiceMonitor cilium-envoy |
| envoy.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-envoy |
| envoy.prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-envoy |
| envoy.prometheus.serviceMonitor.metricRelabelings | string | `nil` | Metrics relabeling configs for the ServiceMonitor cilium-envoy or for cilium-agent with Envoy configured. |
| envoy.prometheus.serviceMonitor.relabelings | list | `[{"replacement":"${1}","sourceLabels":["__meta_kubernetes_pod_node_name"],"targetLabel":"node"}]` | Relabeling configs for the ServiceMonitor cilium-envoy or for cilium-agent with Envoy configured. |
| envoy.readinessProbe.failureThreshold | int | `3` | failure threshold of readiness probe |
| envoy.readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe |
| envoy.resources | object | `{}` | Envoy resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ |
@ -453,9 +455,11 @@ contributors across the globe, there is almost always someone available to help.
| hubble.relay.dialTimeout | string | `nil` | Dial timeout to connect to the local hubble instance to receive peer information (e.g. "30s"). |
| hubble.relay.enabled | bool | `false` | Enable Hubble Relay (requires hubble.enabled=true) |
| hubble.relay.extraEnv | list | `[]` | Additional hubble-relay environment variables. |
| hubble.relay.extraVolumeMounts | list | `[]` | Additional hubble-relay volumeMounts. |
| hubble.relay.extraVolumes | list | `[]` | Additional hubble-relay volumes. |
| hubble.relay.gops.enabled | bool | `true` | Enable gops for hubble-relay |
| hubble.relay.gops.port | int | `9893` | Configure gops listen port for hubble-relay |
| hubble.relay.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.15.0-pre.3","useDigest":false}` | Hubble-relay container image. |
| hubble.relay.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-relay","tag":"v1.15.0","useDigest":false}` | Hubble-relay container image. |
| hubble.relay.listenHost | string | `""` | Host to listen to. Specify an empty string to bind to all the interfaces. |
| hubble.relay.listenPort | string | `"4245"` | Port to listen to. |
| hubble.relay.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
@ -513,7 +517,7 @@ contributors across the globe, there is almost always someone available to help.
| hubble.ui.backend.extraEnv | list | `[]` | Additional hubble-ui backend environment variables. |
| hubble.ui.backend.extraVolumeMounts | list | `[]` | Additional hubble-ui backend volumeMounts. |
| hubble.ui.backend.extraVolumes | list | `[]` | Additional hubble-ui backend volumes. |
| hubble.ui.backend.image | object | `{"digest":"sha256:1f86f3400827a0451e6332262467f894eeb7caf0eb8779bd951e2caa9d027cbe","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.12.1","useDigest":true}` | Hubble-ui backend image. |
| hubble.ui.backend.image | object | `{"digest":"sha256:1cd84251cec46e20f9e839ee0afba9b51c8de59d35681234f701d7f42062f138","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui-backend","tag":"v0.12.3","useDigest":true}` | Hubble-ui backend image. |
| hubble.ui.backend.livenessProbe.enabled | bool | `false` | Enable liveness probe for Hubble-ui backend (requires Hubble-ui 0.12+) |
| hubble.ui.backend.readinessProbe.enabled | bool | `false` | Enable readiness probe for Hubble-ui backend (requires Hubble-ui 0.12+) |
| hubble.ui.backend.resources | object | `{}` | Resource requests and limits for the 'backend' container of the 'hubble-ui' deployment. |
@ -523,7 +527,7 @@ contributors across the globe, there is almost always someone available to help.
| hubble.ui.frontend.extraEnv | list | `[]` | Additional hubble-ui frontend environment variables. |
| hubble.ui.frontend.extraVolumeMounts | list | `[]` | Additional hubble-ui frontend volumeMounts. |
| hubble.ui.frontend.extraVolumes | list | `[]` | Additional hubble-ui frontend volumes. |
| hubble.ui.frontend.image | object | `{"digest":"sha256:9e5f81ee747866480ea1ac4630eb6975ff9227f9782b7c93919c081c33f38267","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.12.1","useDigest":true}` | Hubble-ui frontend image. |
| hubble.ui.frontend.image | object | `{"digest":"sha256:e6b825302fc1e406b1305363fe0bcd1fdf95730b32c2b99a2b36dfa37bdaeec2","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/hubble-ui","tag":"v0.12.3","useDigest":true}` | Hubble-ui frontend image. |
| hubble.ui.frontend.resources | object | `{}` | Resource requests and limits for the 'frontend' container of the 'hubble-ui' deployment. |
| hubble.ui.frontend.securityContext | object | `{}` | Hubble-ui frontend security context. |
| hubble.ui.frontend.server.ipv6 | object | `{"enabled":true}` | Controls server listener for ipv6 |
@ -550,7 +554,7 @@ contributors across the globe, there is almost always someone available to help.
| hubble.ui.updateStrategy | object | `{"rollingUpdate":{"maxUnavailable":1},"type":"RollingUpdate"}` | hubble-ui update strategy. |
| identityAllocationMode | string | `"crd"` | Method to use for identity allocation (`crd` or `kvstore`). |
| identityChangeGracePeriod | string | `"5s"` | Time to wait before using new identity on endpoint identity change. |
| image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.0-pre.3","useDigest":false}` | Agent container image. |
| image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.0","useDigest":false}` | Agent container image. |
| imagePullSecrets | string | `nil` | Configure image pull secrets for pulling container images |
| ingressController.default | bool | `false` | Set cilium ingress controller to be the default ingress controller This will let cilium ingress controller route entries without ingress class set |
| ingressController.defaultSecretName | string | `nil` | Default secret name for ingresses without .spec.tls[].secretName set. |
@ -574,6 +578,7 @@ contributors across the globe, there is almost always someone available to help.
| ingressController.service.name | string | `"cilium-ingress"` | Service name |
| ingressController.service.secureNodePort | string | `nil` | Configure a specific nodePort for secure HTTPS traffic on the shared LB service |
| ingressController.service.type | string | `"LoadBalancer"` | Service type for the shared LB service |
| initResources | object | `{}` | resources & limits for the agent init containers |
| installNoConntrackIptablesRules | bool | `false` | Install Iptables rules to skip netfilter connection tracking on all pod traffic. This option is only effective when Cilium is running in direct routing and full KPR mode. Moreover, this option cannot be enabled when Cilium is running in a managed Kubernetes environment or in a chained CNI setup. |
| ipMasqAgent | object | `{"enabled":false}` | Configure the eBPF-based ip-masq-agent |
| ipam.ciliumNodeUpdateRate | string | `"15s"` | Maximum rate at which the CiliumNode custom resource is updated. |
@ -662,7 +667,7 @@ contributors across the globe, there is almost always someone available to help.
| operator.extraVolumes | list | `[]` | Additional cilium-operator volumes. |
| operator.identityGCInterval | string | `"15m0s"` | Interval for identity garbage collection. |
| operator.identityHeartbeatTimeout | string | `"30m0s"` | Timeout for identity heartbeats. |
| operator.image | object | `{"alibabacloudDigest":"","awsDigest":"","azureDigest":"","genericDigest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.15.0-pre.3","useDigest":false}` | cilium-operator image. |
| operator.image | object | `{"alibabacloudDigest":"","awsDigest":"","azureDigest":"","genericDigest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/operator","suffix":"","tag":"v1.15.0","useDigest":false}` | cilium-operator image. |
| operator.nodeGCInterval | string | `"5m0s"` | Interval for cilium node garbage collection. |
| operator.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for cilium-operator pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
| operator.podAnnotations | object | `{}` | Annotations to be added to cilium-operator pods |
@ -712,7 +717,7 @@ contributors across the globe, there is almost always someone available to help.
| preflight.extraEnv | list | `[]` | Additional preflight environment variables. |
| preflight.extraVolumeMounts | list | `[]` | Additional preflight volumeMounts. |
| preflight.extraVolumes | list | `[]` | Additional preflight volumes. |
| preflight.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.0-pre.3","useDigest":false}` | Cilium pre-flight image. |
| preflight.image | object | `{"digest":"","override":null,"pullPolicy":"IfNotPresent","repository":"quay.io/cilium/cilium","tag":"v1.15.0","useDigest":false}` | Cilium pre-flight image. |
| preflight.nodeSelector | object | `{"kubernetes.io/os":"linux"}` | Node labels for preflight pod assignment ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector |
| preflight.podAnnotations | object | `{}` | Annotations to be added to preflight pods |
| preflight.podDisruptionBudget.enabled | bool | `false` | enable PodDisruptionBudget ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ |
@ -747,7 +752,7 @@ contributors across the globe, there is almost always someone available to help.
| rbac.create | bool | `true` | Enable creation of Resource-Based Access Control configuration. |
| readinessProbe.failureThreshold | int | `3` | failure threshold of readiness probe |
| readinessProbe.periodSeconds | int | `30` | interval between checks of the readiness probe |
| remoteNodeIdentity | bool | `true` | Enable use of the remote node identity. ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity |
| remoteNodeIdentity | bool | `true` | Enable use of the remote node identity. ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity Deprecated without replacement in 1.15. To be removed in 1.16. |
| resourceQuotas | object | `{"cilium":{"hard":{"pods":"10k"}},"enabled":false,"operator":{"hard":{"pods":"15"}}}` | Enable resource quotas for priority classes used in the cluster. |
| resources | object | `{}` | Agent resource limits & requests ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ |
| rollOutCiliumPods | bool | `false` | Roll out cilium agent pods automatically when configmap is updated. |

View File

@ -11,9 +11,9 @@ set -o nounset
# dependencies on anything that is part of the startup script
# itself, and can be safely run multiple times per node (e.g. in
# case of a restart).
if [[ "$(iptables-save | grep -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
then
echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
iptables-save | grep -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
fi
echo 'Done!'

View File

@ -5823,7 +5823,7 @@
"refId": "C"
},
{
"expr": "sum(cilium_policy_change_total{k8s_app=\"cilium\", pod=~\"$pod\"}, outcome=\"fail\") by (pod)",
"expr": "sum(cilium_policy_change_total{k8s_app=\"cilium\", pod=~\"$pod\", outcome=\"fail\"}) by (pod)",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "policy change errors",

View File

@ -100,7 +100,7 @@ then
# Since that version containerd no longer allows missing configuration for the CNI,
# not even for pods with hostNetwork set to true. Thus, we add a temporary one.
# This will be replaced with the real config by the agent pod.
echo -e "{\n\t"cniVersion": "0.3.1",\n\t"name": "cilium",\n\t"type": "cilium-cni"\n}" > /etc/cni/net.d/05-cilium.conf
echo -e '{\n\t"cniVersion": "0.3.1",\n\t"name": "cilium",\n\t"type": "cilium-cni"\n}' > /etc/cni/net.d/05-cilium.conf
fi
# Start containerd. It won't create it's CNI configuration file anymore.

View File

@ -405,6 +405,9 @@ spec:
volumeMounts:
- name: cilium-run
mountPath: /var/run/cilium
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.monitor.resources }}
resources:
{{- toYaml . | trim | nindent 10 }}
@ -454,6 +457,9 @@ spec:
volumeMounts:
- name: tmp
mountPath: /tmp
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 8 }}
{{- end }}
terminationMessagePolicy: FallbackToLogsOnError
{{- if .Values.cgroup.autoMount.enabled }}
# Required to mount cgroup2 filesystem on the underlying Kubernetes node.
@ -507,6 +513,10 @@ spec:
- name: apply-sysctl-overwrites
image: {{ include "cilium.image" .Values.image | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.initResources }}
resources:
{{- toYaml . | trim | nindent 10 }}
{{- end }}
env:
- name: BIN_PATH
value: {{ .Values.cni.binPath }}
@ -552,6 +562,10 @@ spec:
- name: mount-bpf-fs
image: {{ include "cilium.image" .Values.image | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.initResources }}
resources:
{{- toYaml . | trim | nindent 10 }}
{{- end }}
args:
- 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf'
command:
@ -573,6 +587,10 @@ spec:
- name: wait-for-node-init
image: {{ include "cilium.image" .Values.image | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.initResources }}
resources:
{{- toYaml . | trim | nindent 10 }}
{{- end }}
command:
- sh
- -c
@ -650,7 +668,10 @@ spec:
mountPropagation: HostToContainer
- name: cilium-run
mountPath: /var/run/cilium
{{- with .Values.nodeinit.resources }}
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.initResources }}
resources:
{{- toYaml . | trim | nindent 10 }}
{{- end }}
@ -658,6 +679,10 @@ spec:
- name: wait-for-kube-proxy
image: {{ include "cilium.image" .Values.image | quote }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- with .Values.initResources }}
resources:
{{- toYaml . | trim | nindent 10 }}
{{- end }}
securityContext:
privileged: true
command:

View File

@ -39,6 +39,20 @@ spec:
metricRelabelings:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if .Values.envoy.prometheus.serviceMonitor.enabled }}
- port: envoy-metrics
interval: {{ .Values.envoy.prometheus.serviceMonitor.interval | quote }}
honorLabels: true
path: /metrics
{{- with .Values.envoy.prometheus.serviceMonitor.relabelings }}
relabelings:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.envoy.prometheus.serviceMonitor.metricRelabelings }}
metricRelabelings:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
targetLabels:
- k8s-app
{{- if .Values.prometheus.serviceMonitor.jobLabel }}

View File

@ -14,6 +14,7 @@
{{- $azureUsePrimaryAddress := "true" -}}
{{- $defaultK8sClientQPS := 5 -}}
{{- $defaultK8sClientBurst := 10 -}}
{{- $defaultDNSProxyEnableTransparentMode := "false" -}}
{{- /* Default values when 1.8 was initially deployed */ -}}
{{- if semverCompare ">=1.8" (default "1.8" .Values.upgradeCompatibility) -}}
@ -48,6 +49,7 @@
{{- $azureUsePrimaryAddress = "false" -}}
{{- end }}
{{- $defaultKubeProxyReplacement = "disabled" -}}
{{- $defaultDNSProxyEnableTransparentMode = "true" -}}
{{- end -}}
{{- /* Default values when 1.14 was initially deployed */ -}}
@ -448,9 +450,15 @@ data:
# - vxlan (default)
# - geneve
{{- if .Values.gke.enabled }}
{{- if ne (.Values.routingMode | default "native") "native" }}
{{- fail (printf "RoutingMode must be set to native when gke.enabled=true" )}}
{{- end }}
routing-mode: "native"
enable-endpoint-routes: "true"
{{- else if .Values.aksbyocni.enabled }}
{{- if ne (.Values.routingMode | default "tunnel") "tunnel" }}
{{- fail (printf "RoutingMode must be set to tunnel when aksbyocni.enabled=true" )}}
{{- end }}
routing-mode: "tunnel"
tunnel-protocol: "vxlan"
{{- else if .Values.routingMode }}
@ -1153,6 +1161,13 @@ data:
{{- end }}
{{- if .Values.dnsProxy }}
{{- if hasKey .Values.dnsProxy "enableTransparentMode" }}
# explicit setting gets precedence
dnsproxy-enable-transparent-mode: {{ .Values.dnsProxy.enableTransparentMode | quote }}
{{- else if eq $cniChainingMode "none" }}
# default DNS proxy to transparent mode in non-chaining modes
dnsproxy-enable-transparent-mode: {{ $defaultDNSProxyEnableTransparentMode | quote }}
{{- end }}
{{- if .Values.dnsProxy.dnsRejectResponseCode }}
tofqdns-dns-reject-response-code: {{ .Values.dnsProxy.dnsRejectResponseCode | quote }}
{{- end }}

View File

@ -7,15 +7,16 @@ metadata:
namespace: {{ .Values.envoy.prometheus.serviceMonitor.namespace | default .Release.Namespace }}
labels:
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-envoy
{{- with .Values.envoy.prometheus.serviceMonitor.labels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if or .Values.envoy.prometheus.serviceMonitor .Values.envoy.annotations }}
{{- if or .Values.envoy.prometheus.serviceMonitor.annotations .Values.envoy.annotations }}
annotations:
{{- with .Values.envoy.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.envoy.prometheus.serviceMonitor }}
{{- with .Values.envoy.prometheus.serviceMonitor.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -70,8 +70,13 @@ spec:
- /tmp/ready
initialDelaySeconds: 5
periodSeconds: 5
{{- with .Values.preflight.extraEnv }}
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
{{- with .Values.preflight.extraEnv }}
{{- toYaml . | trim | nindent 12 }}
{{- end }}
volumeMounts:

View File

@ -60,6 +60,10 @@ spec:
- /tmp/ready-validate-cnp
initialDelaySeconds: 5
periodSeconds: 5
{{- with .Values.preflight.extraVolumeMounts }}
volumeMounts:
{{- toYaml . | nindent 10 }}
{{- end }}
env:
{{- if .Values.k8sServiceHost }}
- name: KUBERNETES_SERVICE_HOST
@ -77,11 +81,16 @@ spec:
{{- toYaml . | trim | nindent 12 }}
{{- end }}
terminationMessagePolicy: FallbackToLogsOnError
{{- with .Values.preflight.extraVolumes }}
volumes:
{{- toYaml . | trim | nindent 6 }}
{{- end }}
hostNetwork: true
restartPolicy: Always
priorityClassName: {{ include "cilium.priorityClass" (list $ .Values.preflight.priorityClassName "system-cluster-critical") }}
serviceAccount: {{ .Values.serviceAccounts.preflight.name | quote }}
serviceAccountName: {{ .Values.serviceAccounts.preflight.name | quote }}
automountServiceAccountToken: {{ .Values.serviceAccounts.preflight.automount }}
terminationGracePeriodSeconds: {{ .Values.preflight.terminationGracePeriodSeconds }}
{{- with .Values.preflight.affinity }}
affinity:

View File

@ -82,6 +82,9 @@ spec:
volumeMounts:
- name: etcd-data-dir
mountPath: /var/run/etcd
{{- with .Values.clustermesh.apiserver.extraVolumeMounts }}
{{- toYaml . | nindent 8 }}
{{- end }}
terminationMessagePolicy: FallbackToLogsOnError
{{- with .Values.clustermesh.apiserver.etcd.init.resources }}
resources:
@ -133,6 +136,9 @@ spec:
readOnly: true
- name: etcd-data-dir
mountPath: /var/run/etcd
{{- with .Values.clustermesh.apiserver.extraVolumeMounts }}
{{- toYaml . | nindent 8 }}
{{- end }}
terminationMessagePolicy: FallbackToLogsOnError
{{- with .Values.clustermesh.apiserver.etcd.resources }}
resources:

View File

@ -108,6 +108,9 @@ spec:
mountPath: /var/lib/hubble-relay/tls
readOnly: true
{{- end }}
{{- with .Values.hubble.relay.extraVolumeMounts }}
{{- toYaml . | nindent 10 }}
{{- end }}
terminationMessagePolicy: FallbackToLogsOnError
restartPolicy: Always
priorityClassName: {{ .Values.hubble.relay.priorityClassName }}
@ -178,6 +181,9 @@ spec:
path: server.key
{{- end }}
{{- end }}
{{- with .Values.hubble.relay.extraVolumes }}
{{- toYaml . | nindent 6 }}
{{- end }}
{{- end }}
{{- define "hubble-relay.probe" }}

View File

@ -99,10 +99,12 @@ spec:
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.authentication.mutual.spire.install.agent.tolerations }}
tolerations:
{{- toYaml . | trim | nindent 8 }}
{{- end }}
{{- with .Values.authentication.mutual.spire.install.agent.tolerations }}
{{- toYaml . | trim | nindent 8 }}
{{- end }}
- key: {{ .Values.agentNotReadyTaintKey | default "node.cilium.io/agent-not-ready" }}
effect: NoSchedule
volumes:
- name: spire-config
configMap:

View File

@ -1,4 +1,4 @@
{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled -}}
{{- if and .Values.authentication.mutual.spire.enabled .Values.authentication.mutual.spire.install.enabled (not .Values.authentication.mutual.spire.install.existingNamespace) -}}
apiVersion: v1
kind: Namespace
metadata:

View File

@ -146,7 +146,7 @@ rollOutCiliumPods: false
image:
override: ~
repository: "quay.io/cilium/cilium"
tag: "v1.15.0-pre.3"
tag: "v1.15.0"
pullPolicy: "IfNotPresent"
# cilium-digest
digest: ""
@ -236,6 +236,9 @@ resources: {}
# cpu: 100m
# memory: 512Mi
# -- resources & limits for the agent init containers
initResources: {}
securityContext:
# -- User to run the pod with
# runAsUser: 0
@ -1225,7 +1228,7 @@ hubble:
image:
override: ~
repository: "quay.io/cilium/hubble-relay"
tag: "v1.15.0-pre.3"
tag: "v1.15.0"
# hubble-relay-digest
digest: ""
useDigest: false
@ -1296,6 +1299,12 @@ hubble:
rollingUpdate:
maxUnavailable: 1
# -- Additional hubble-relay volumes.
extraVolumes: []
# -- Additional hubble-relay volumeMounts.
extraVolumeMounts: []
# -- hubble-relay pod security context
podSecurityContext:
fsGroup: 65532
@ -1456,8 +1465,8 @@ hubble:
image:
override: ~
repository: "quay.io/cilium/hubble-ui-backend"
tag: "v0.12.1"
digest: "sha256:1f86f3400827a0451e6332262467f894eeb7caf0eb8779bd951e2caa9d027cbe"
tag: "v0.12.3"
digest: "sha256:1cd84251cec46e20f9e839ee0afba9b51c8de59d35681234f701d7f42062f138"
useDigest: true
pullPolicy: "IfNotPresent"
@ -1495,8 +1504,8 @@ hubble:
image:
override: ~
repository: "quay.io/cilium/hubble-ui"
tag: "v0.12.1"
digest: "sha256:9e5f81ee747866480ea1ac4630eb6975ff9227f9782b7c93919c081c33f38267"
tag: "v0.12.3"
digest: "sha256:e6b825302fc1e406b1305363fe0bcd1fdf95730b32c2b99a2b36dfa37bdaeec2"
useDigest: true
pullPolicy: "IfNotPresent"
@ -2059,9 +2068,9 @@ envoy:
image:
override: ~
repository: "quay.io/cilium/cilium-envoy"
tag: "v1.27.2-f19708f3d0188fe39b7e024b4525b75a9eeee61f"
tag: "v1.27.2-13f6142b9c02268b10d547c8b093ef16724538e3"
pullPolicy: "IfNotPresent"
digest: "sha256:80de27c1d16ab92923cc0cd1fff90f2e7047a9abf3906fda712268d9cbc5b950"
digest: "sha256:bf37c46d3d6bd5f51ff11d09de81671ced070e27912e080083c58a6d3fbb740f"
useDigest: true
# -- Additional containers added to the cilium Envoy DaemonSet.
@ -2198,12 +2207,16 @@ envoy:
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ~
# -- Configure Cilium Envoy Prometheus options.
# Note that some of these apply to either cilium-agent or cilium-envoy.
prometheus:
# -- Enable prometheus metrics for cilium-envoy
enabled: true
serviceMonitor:
# -- Enable service monitors.
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
# Note that this setting applies to both cilium-envoy _and_ cilium-agent
# with Envoy enabled.
enabled: false
# -- Labels to add to ServiceMonitor cilium-envoy
labels: {}
@ -2215,18 +2228,21 @@ envoy:
# service monitors configured.
# namespace: ""
# -- Relabeling configs for the ServiceMonitor cilium-envoy
# or for cilium-agent with Envoy configured.
relabelings:
- sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: node
replacement: ${1}
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
# or for cilium-agent with Envoy configured.
metricRelabelings: ~
# -- Serve prometheus metrics for cilium-envoy on the configured port
port: "9964"
# -- Enable use of the remote node identity.
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
# Deprecated without replacement in 1.15. To be removed in 1.16.
remoteNodeIdentity: true
# -- Enable resource quotas for priority classes used in the cluster.
@ -2460,7 +2476,7 @@ operator:
image:
override: ~
repository: "quay.io/cilium/operator"
tag: "v1.15.0-pre.3"
tag: "v1.15.0"
# operator-generic-digest
genericDigest: ""
# operator-azure-digest
@ -2755,7 +2771,7 @@ preflight:
image:
override: ~
repository: "quay.io/cilium/cilium"
tag: "v1.15.0-pre.3"
tag: "v1.15.0"
# cilium-digest
digest: ""
useDigest: false
@ -2917,7 +2933,7 @@ clustermesh:
image:
override: ~
repository: "quay.io/cilium/clustermesh-apiserver"
tag: "v1.15.0-pre.3"
tag: "v1.15.0"
# clustermesh-apiserver-digest
digest: ""
useDigest: false
@ -3310,6 +3326,8 @@ dnsProxy:
proxyPort: 0
# -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
proxyResponseMaxDelay: 100ms
# -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
# enableTransparentMode: true
# -- SCTP Configuration Values
sctp:
@ -3349,6 +3367,8 @@ authentication:
enabled: true
# -- SPIRE namespace to install into
namespace: cilium-spire
# -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace.
existingNamespace: false
# -- init container image of SPIRE agent and server
initImage:
override: ~
@ -3363,8 +3383,8 @@ authentication:
image:
override: ~
repository: "ghcr.io/spiffe/spire-agent"
tag: "1.8.4"
digest: "sha256:d489bc8470d7a0f292e0e3576c3e7025253343dc798241bcfd9061828e2a6bef"
tag: "1.8.5"
digest: "sha256:99405637647968245ff9fe215f8bd2bd0ea9807be9725f8bf19fe1b21471e52b"
useDigest: true
pullPolicy: "IfNotPresent"
# -- SPIRE agent service account
@ -3378,8 +3398,21 @@ authentication:
# -- SPIRE Workload Attestor kubelet verification.
skipKubeletVerification: true
# -- SPIRE agent tolerations configuration
# By default it follows the same tolerations as the agent itself
# to allow the Cilium agent on this node to connect to SPIRE.
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
tolerations:
- key: node.kubernetes.io/not-ready
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
effect: NoSchedule
value: "true"
- key: CriticalAddonsOnly
operator: "Exists"
# -- SPIRE agent affinity configuration
affinity: {}
# -- SPIRE agent nodeSelector configuration
@ -3398,8 +3431,8 @@ authentication:
image:
override: ~
repository: "ghcr.io/spiffe/spire-server"
tag: "1.8.4"
digest: "sha256:bf79e0a921f8b8aa92602f7ea335616e72f7e91f939848e7ccc52d5bddfe96a1"
tag: "1.8.5"
digest: "sha256:28269265882048dcf0fed32fe47663cd98613727210b8d1a55618826f9bf5428"
useDigest: true
pullPolicy: "IfNotPresent"
# -- SPIRE server service account

View File

@ -233,6 +233,9 @@ resources: {}
# cpu: 100m
# memory: 512Mi
# -- resources & limits for the agent init containers
initResources: {}
securityContext:
# -- User to run the pod with
# runAsUser: 0
@ -1293,6 +1296,12 @@ hubble:
rollingUpdate:
maxUnavailable: 1
# -- Additional hubble-relay volumes.
extraVolumes: []
# -- Additional hubble-relay volumeMounts.
extraVolumeMounts: []
# -- hubble-relay pod security context
podSecurityContext:
fsGroup: 65532
@ -2195,12 +2204,16 @@ envoy:
# Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: ~
# -- Configure Cilium Envoy Prometheus options.
# Note that some of these apply to either cilium-agent or cilium-envoy.
prometheus:
# -- Enable prometheus metrics for cilium-envoy
enabled: true
serviceMonitor:
# -- Enable service monitors.
# This requires the prometheus CRDs to be available (see https://github.com/prometheus-operator/prometheus-operator/blob/main/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml)
# Note that this setting applies to both cilium-envoy _and_ cilium-agent
# with Envoy enabled.
enabled: false
# -- Labels to add to ServiceMonitor cilium-envoy
labels: {}
@ -2212,18 +2225,21 @@ envoy:
# service monitors configured.
# namespace: ""
# -- Relabeling configs for the ServiceMonitor cilium-envoy
# or for cilium-agent with Envoy configured.
relabelings:
- sourceLabels:
- __meta_kubernetes_pod_node_name
targetLabel: node
replacement: ${1}
# -- Metrics relabeling configs for the ServiceMonitor cilium-envoy
# or for cilium-agent with Envoy configured.
metricRelabelings: ~
# -- Serve prometheus metrics for cilium-envoy on the configured port
port: "9964"
# -- Enable use of the remote node identity.
# ref: https://docs.cilium.io/en/v1.7/install/upgrade/#configmap-remote-node-identity
# Deprecated without replacement in 1.15. To be removed in 1.16.
remoteNodeIdentity: true
# -- Enable resource quotas for priority classes used in the cluster.
@ -3307,6 +3323,8 @@ dnsProxy:
proxyPort: 0
# -- The maximum time the DNS proxy holds an allowed DNS response before sending it along. Responses are sent as soon as the datapath is updated with the new IP information.
proxyResponseMaxDelay: 100ms
# -- DNS proxy operation mode (true/false, or unset to use version dependent defaults)
# enableTransparentMode: true
# -- SCTP Configuration Values
sctp:
@ -3346,6 +3364,8 @@ authentication:
enabled: true
# -- SPIRE namespace to install into
namespace: cilium-spire
# -- SPIRE namespace already exists. Set to true if Helm should not create, manage, and import the SPIRE namespace.
existingNamespace: false
# -- init container image of SPIRE agent and server
initImage:
override: ~
@ -3375,8 +3395,21 @@ authentication:
# -- SPIRE Workload Attestor kubelet verification.
skipKubeletVerification: true
# -- SPIRE agent tolerations configuration
# By default it follows the same tolerations as the agent itself
# to allow the Cilium agent on this node to connect to SPIRE.
# ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
tolerations: []
tolerations:
- key: node.kubernetes.io/not-ready
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: node-role.kubernetes.io/control-plane
effect: NoSchedule
- key: node.cloudprovider.kubernetes.io/uninitialized
effect: NoSchedule
value: "true"
- key: CriticalAddonsOnly
operator: "Exists"
# -- SPIRE agent affinity configuration
affinity: {}
# -- SPIRE agent nodeSelector configuration

View File

@ -6,11 +6,11 @@ index 256a79542..3f3fc714b 100644
name: cilium
displayName: Cilium
home: https://cilium.io/
-version: 1.15.0-pre.3
-appVersion: 1.15.0-pre.3
+version: 1.15.0-pre.3-edg.3
+appVersion: 1.15.0-pre.3-edg.3
-version: 1.15.0
-appVersion: 1.15.0
+version: 1.15.0-edg.1
+appVersion: 1.15.0-edg.1
kubeVersion: ">= 1.16.0-0"
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@main/Documentation/images/logo-solo.svg
icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.15/Documentation/images/logo-solo.svg
description: eBPF-based Networking, Security, and Observability

View File

@ -21,7 +21,7 @@ git clone \
--no-checkout \
--sparse \
--depth 1 \
-b v1.15.0-pre.3-edg.3 \
-b burgerdev/rebase-v1.15.0 \
https://github.com/edgelesssys/cilium.git
cd cilium

View File

@ -198,7 +198,7 @@ func TestHelmApply(t *testing.T) {
if tc.clusterCertManagerVersion != nil {
certManagerVersion = *tc.clusterCertManagerVersion
}
helmListVersion(lister, "cilium", "v1.15.0-pre.3-edg.3")
helmListVersion(lister, "cilium", "v1.15.0-edg.1")
helmListVersion(lister, "cert-manager", certManagerVersion)
helmListVersion(lister, "constellation-services", tc.clusterMicroServiceVersion)
helmListVersion(lister, "constellation-operators", tc.clusterMicroServiceVersion)

View File

@ -367,16 +367,18 @@ func (i *chartLoader) loadCiliumValues(cloudprovider.Provider) (map[string]any,
"image": map[string]any{
"repository": "ghcr.io/edgelesssys/cilium/cilium",
"suffix": "",
"tag": "v1.15.0-pre.3-edg.2",
"digest": "sha256:c21b7fbbb084a128a479d6170e5f89ad2768dfecb4af10ee6a99ffe5d1a11749",
"tag": "v1.15.0-edg.1-experimental",
"digest": "sha256:7c34cad466b5b839f963a17b6dac4e3154a565a1c553cbf03e93cd278ca068c2",
"useDigest": true,
},
"operator": map[string]any{
"image": map[string]any{
"repository": "ghcr.io/edgelesssys/cilium/operator",
"suffix": "",
"tag": "v1.15.0-pre.3-edg.2",
"genericDigest": "sha256:4ea9de5cfeb4554b82b509f0de41120a90e35a15e81a04f76c4cb405ddea3e7c",
"tag": "v1.15.0-edg.1-experimental",
// Careful: this is the digest of ghcr.io/.../operator-generic!
// See magic image manipulation in ./helm/charts/cilium/templates/cilium-operator/_helpers.tpl.
"genericDigest": "sha256:d85bda80753131905514bbc419c3acb00782f0cdc6e5b0e7b6ade10447442ff1",
"useDigest": true,
},
"podDisruptionBudget": map[string]any{