1 # yamllint disable rule:hyphens rule:commas rule:indentation rule:line-length rule:comments rule:comments-indentation
3 # Source: cilium/charts/config/templates/configmap.yaml
11 # Identity allocation mode selects how identities are shared between cilium
12 # nodes by setting how they are stored. The options are "crd" or "kvstore".
13 # - "crd" stores identities in kubernetes as CRDs (custom resource definition).
14 # These can be queried with:
15 # kubectl get ciliumid
16 # - "kvstore" stores identities in a kvstore, etcd or consul, that is
17 # configured below. Cilium versions before 1.6 supported only the kvstore
18 # backend. Upgrades from these older cilium versions should continue using
19 # the kvstore by commenting out the identity-allocation-mode below, or
20 # setting it to "kvstore".
21 identity-allocation-mode: crd
23 # If you want to run cilium in debug mode change this value to true
26 # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
30 # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
34 # If you want cilium monitor to aggregate tracing for packets, set this level
35 # to "low", "medium", or "maximum". The higher the level, the less packets
36 # that will be seen in monitor output.
37 monitor-aggregation: medium
39 # ct-global-max-entries-* specifies the maximum number of connections
40 # supported across all endpoints, split by protocol: tcp or other. One pair
41 # of maps uses these values for IPv4 connections, and another pair of maps
42 # use these values for IPv6 connections.
44 # If these values are modified, then during the next Cilium startup the
45 # tracking of ongoing connections may be disrupted. This may lead to brief
46 # policy drops or a change in loadbalancing decisions for a connection.
48 # For users upgrading from Cilium 1.2 or earlier, to minimize disruption
49 # during the upgrade process, comment out these options.
50 bpf-ct-global-tcp-max: "524288"
51 bpf-ct-global-any-max: "262144"
53 # Pre-allocation of map entries allows per-packet latency to be reduced, at
54 # the expense of up-front memory allocation for the entries in the maps. The
55 # default value below will minimize memory usage in the default installation;
56 # users who are sensitive to latency may consider setting this to "true".
58 # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
59 # this option and behave as though it is set to "true".
61 # If this value is modified, then during the next Cilium startup the restore
62 # of existing endpoints and tracking of ongoing connections may be disrupted.
63 # This may lead to policy drops or a change in loadbalancing decisions for a
64 # connection for some time. Endpoints may need to be recreated to restore
67 # If this option is set to "false" during an upgrade from 1.3 or earlier to
68 # 1.4 or later, then it may cause one-time disruptions during the upgrade.
69 preallocate-bpf-maps: "false"
71 # Regular expression matching compatible Istio sidecar istio-proxy
72 # container image names
73 sidecar-istio-proxy-image: "cilium/istio_proxy"
75 # Encapsulation mode for communication between nodes
82 # Name of the cluster. Only relevant when building a mesh of clusters.
85 # DNS Polling periodically issues a DNS lookup for each `matchName` from
86 # cilium-agent. The result is used to regenerate endpoint policy.
87 # DNS lookups are repeated with an interval of 5 seconds, and are made for
88 # A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP
89 # data is used instead. An IP change will trigger a regeneration of the Cilium
90 # policy for each endpoint and increment the per cilium-agent policy
91 # repository revision.
93 # This option is disabled by default starting from version 1.4.x in favor
94 # of a more powerful DNS proxy-based implementation, see [0] for details.
95 # Enable this option if you want to use FQDN policies but do not want to use
98 # To ease upgrade, users may opt to set this option to "true".
99 # Otherwise please refer to the Upgrade Guide [1] which explains how to
100 # prepare policy rules for upgrade.
102 # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
103 # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
104 tofqdns-enable-poller: "false"
106 # wait-bpf-mount makes init container wait until bpf filesystem is mounted
107 wait-bpf-mount: "false"
109 # Enable fetching of container-runtime specific metadata
111 # By default, the Kubernetes pod and namespace labels are retrieved and
112 # associated with endpoints for identification purposes. By integrating
113 # with the container runtime, container runtime specific labels can be
114 # retrieved, such labels will be prefixed with container:
116 # CAUTION: The container runtime labels can include information such as pod
117 # annotations which may result in each pod being associated a unique set of
118 # labels which can result in excessive security identities being allocated.
119 # Please review the labels filter when enabling container runtime labels.
126 # - auto (automatically detect the container runtime)
128 container-runtime: none
132 install-iptables-rules: "true"
133 auto-direct-node-routes: "false"
134 enable-node-port: "false"
137 # Source: cilium/charts/agent/templates/serviceaccount.yaml
142 namespace: kube-system
145 # Source: cilium/charts/operator/templates/serviceaccount.yaml
149 name: cilium-operator
150 namespace: kube-system
153 # Source: cilium/charts/agent/templates/clusterrole.yaml
154 apiVersion: rbac.authorization.k8s.io/v1
205 - apiextensions.k8s.io
207 - customresourcedefinitions
217 - ciliumnetworkpolicies
218 - ciliumnetworkpolicies/status
220 - ciliumendpoints/status
224 - ciliumidentities/status
229 # Source: cilium/charts/operator/templates/clusterrole.yaml
230 apiVersion: rbac.authorization.k8s.io/v1
233 name: cilium-operator
238 # to automatically delete [core|kube]dns pods so that are starting to being
249 # to automatically read from k8s and import the node's pod CIDR to cilium's
250 # etcd so all nodes know how to reach another pod running in in a different
253 # to perform the translation of a CNP that contains `ToGroup` to its endpoints
256 # to check apiserver connectivity
265 - ciliumnetworkpolicies
266 - ciliumnetworkpolicies/status
268 - ciliumendpoints/status
272 - ciliumidentities/status
277 # Source: cilium/charts/agent/templates/clusterrolebinding.yaml
278 apiVersion: rbac.authorization.k8s.io/v1
279 kind: ClusterRoleBinding
283 apiGroup: rbac.authorization.k8s.io
287 - kind: ServiceAccount
289 namespace: kube-system
292 # Source: cilium/charts/operator/templates/clusterrolebinding.yaml
293 apiVersion: rbac.authorization.k8s.io/v1
294 kind: ClusterRoleBinding
296 name: cilium-operator
298 apiGroup: rbac.authorization.k8s.io
300 name: cilium-operator
302 - kind: ServiceAccount
303 name: cilium-operator
304 namespace: kube-system
307 # Source: cilium/charts/agent/templates/daemonset.yaml
313 kubernetes.io/cluster-service: "true"
315 namespace: kube-system
320 kubernetes.io/cluster-service: "true"
324 # This annotation plus the CriticalAddonsOnly toleration makes
325 # cilium to be a critical pod in the cluster, which ensures cilium
326 # gets priority scheduling.
327 # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
328 scheduler.alpha.kubernetes.io/critical-pod: ""
329 scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]'
332 kubernetes.io/cluster-service: "true"
336 - --config-dir=/tmp/cilium/config-map
340 - name: K8S_NODE_NAME
344 fieldPath: spec.nodeName
345 - name: CILIUM_K8S_NAMESPACE
349 fieldPath: metadata.namespace
350 - name: CILIUM_FLANNEL_MASTER_DEVICE
353 key: flannel-master-device
356 - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
359 key: flannel-uninstall-on-exit
362 - name: CILIUM_CLUSTERMESH_CONFIG
363 value: /var/lib/cilium/clustermesh/
364 - name: CILIUM_CNI_CHAINING_MODE
367 key: cni-chaining-mode
370 - name: CILIUM_CUSTOM_CNI_CONF
376 image: "iecedge/cilium:latest"
377 imagePullPolicy: IfNotPresent
394 # The initial delay for the liveness probe is intentionally large to
395 # avoid an endless kill & restart cycle if in the event that the initial
396 # bootstrapping takes longer than expected.
397 initialDelaySeconds: 120
409 initialDelaySeconds: 5
420 - mountPath: /sys/fs/bpf
422 - mountPath: /var/run/cilium
424 - mountPath: /host/opt/cni/bin
426 - mountPath: /host/etc/cni/net.d
428 - mountPath: /var/lib/cilium/clustermesh
429 name: clustermesh-secrets
431 - mountPath: /tmp/cilium/config-map
432 name: cilium-config-path
434 # Needed to be able to load kernel modules
435 - mountPath: /lib/modules
438 - mountPath: /run/xtables.lock
445 - name: CILIUM_ALL_STATE
448 key: clean-cilium-state
451 - name: CILIUM_BPF_STATE
454 key: clean-cilium-bpf-state
457 - name: CILIUM_WAIT_BPF_MOUNT
463 image: "iecedge/cilium:latest"
464 imagePullPolicy: IfNotPresent
465 name: clean-cilium-state
472 - mountPath: /sys/fs/bpf
474 - mountPath: /var/run/cilium
476 restartPolicy: Always
477 serviceAccount: cilium
478 serviceAccountName: cilium
479 terminationGracePeriodSeconds: 1
483 # To keep state between restarts / upgrades
485 path: /var/run/cilium
486 type: DirectoryOrCreate
488 # To keep state between restarts / upgrades for bpf maps
491 type: DirectoryOrCreate
493 # To install cilium cni plugin in the host
496 type: DirectoryOrCreate
498 # To install cilium cni configuration in the host
501 type: DirectoryOrCreate
503 # To be able to load kernel modules
507 # To access iptables concurrently with other processes (e.g. kube-proxy)
509 path: /run/xtables.lock
512 # To read the clustermesh configuration
513 - name: clustermesh-secrets
517 secretName: cilium-clustermesh
518 # To read the configuration from the config map
521 name: cilium-config-path
528 # Source: cilium/charts/operator/templates/deployment.yaml
533 io.cilium/app: operator
534 name: cilium-operator
535 name: cilium-operator
536 namespace: kube-system
541 io.cilium/app: operator
542 name: cilium-operator
552 io.cilium/app: operator
553 name: cilium-operator
557 - --debug=$(CILIUM_DEBUG)
558 - --identity-allocation-mode=$(CILIUM_IDENTITY_ALLOCATION_MODE)
562 - name: CILIUM_K8S_NAMESPACE
566 fieldPath: metadata.namespace
567 - name: K8S_NODE_NAME
571 fieldPath: spec.nodeName
578 - name: CILIUM_CLUSTER_NAME
584 - name: CILIUM_CLUSTER_ID
596 - name: CILIUM_DISABLE_ENDPOINT_CRD
599 key: disable-endpoint-crd
602 - name: CILIUM_KVSTORE
608 - name: CILIUM_KVSTORE_OPT
614 - name: AWS_ACCESS_KEY_ID
617 key: AWS_ACCESS_KEY_ID
620 - name: AWS_SECRET_ACCESS_KEY
623 key: AWS_SECRET_ACCESS_KEY
626 - name: AWS_DEFAULT_REGION
629 key: AWS_DEFAULT_REGION
632 - name: CILIUM_IDENTITY_ALLOCATION_MODE
635 key: identity-allocation-mode
638 image: "iecedge/operator:latest"
639 imagePullPolicy: IfNotPresent
640 name: cilium-operator
646 initialDelaySeconds: 60
651 restartPolicy: Always
652 serviceAccount: cilium-operator
653 serviceAccountName: cilium-operator
656 # Source: cilium/charts/agent/templates/servicemonitor.yaml
659 # Source: cilium/charts/agent/templates/svc.yaml
662 # Source: cilium/charts/operator/templates/servicemonitor.yaml
665 # Source: cilium/charts/operator/templates/svc.yaml