Integrate contiv/vpp CNI into IEC
[iec.git] / src / foundation / scripts / cni / contivpp / contiv-vpp.yaml
1 ---
2 # Source: contiv-vpp/templates/vpp.yaml
3 # Contiv-VPP deployment YAML file. This deploys Contiv VPP networking on a Kuberntes cluster.
4 # The deployment consists of the following components:
5 #   - contiv-etcd - deployed on k8s master
6 #   - contiv-vswitch - deployed on each k8s node
7 #   - contiv-ksr - deployed on k8s master
8
9 ###########################################################
10 #  Configuration
11 ###########################################################
12
13 # This config map contains contiv-agent configuration. The most important part is the ipamConfig,
14 # which may be updated in case the default IPAM settings do not match your needs.
15 # nodeConfig may be used in case your nodes have more than 1 VPP interface. In that case, one
16 # of them needs to be marked as the main inter-node interface, and the rest of them can be
17 # configured with any IP addresses (the IPs cannot conflict with the main IPAM config).
18 apiVersion: v1
19 kind: ConfigMap
20 metadata:
21   name: contiv-agent-cfg
22   namespace: kube-system
23 data:
24   contiv.conf: |-
25     useNoOverlay: false
26     useTAPInterfaces: true
27     tapInterfaceVersion: 2
28     tapv2RxRingSize: 256
29     tapv2TxRingSize: 256
30     tcpChecksumOffloadDisabled: true
31     STNVersion: 1
32     natExternalTraffic: true
33     mtuSize: 1450
34     scanIPNeighbors: true
35     ipNeighborScanInterval: 1
36     ipNeighborStaleThreshold: 4
37     enablePacketTrace: false
38     routeServiceCIDRToVPP: false
39     crdNodeConfigurationDisabled: true
40     ipamConfig:
41       nodeInterconnectDHCP: false
42       nodeInterconnectCIDR: 192.168.16.0/24
43       podSubnetCIDR: 10.1.0.0/16
44       podSubnetOneNodePrefixLen: 24
45       vppHostSubnetCIDR: 172.30.0.0/16
46       vppHostSubnetOneNodePrefixLen: 24
47       vxlanCIDR: 192.168.30.0/24
48   controller.conf: |
49     enableRetry: true
50     delayRetry: 1000000000
51     maxRetryAttempts: 3
52     enableExpBackoffRetry: true
53     delayLocalResync: 5000000000
54     startupResyncDeadline: 30000000000
55     enablePeriodicHealing: false
56     periodicHealingInterval: 30000000000
57     delayAfterErrorHealing: 5000000000
58     remoteDBProbingInterval: 3000000000
59     recordEventHistory: true
60     eventHistoryAgeLimit: 1440
61     permanentlyRecordedInitPeriod: 60
62   service.conf: |
63     cleanupIdleNATSessions: true
64     tcpNATSessionTimeout: 180
65     otherNATSessionTimeout: 5
66     serviceLocalEndpointWeight: 1
67     disableNATVirtualReassembly: false
68
69 ---
70
71 apiVersion: v1
72 kind: ConfigMap
73 metadata:
74   name: vpp-agent-cfg
75   namespace: kube-system
76 data:
77   govpp.conf: |
78     health-check-probe-interval: 3000000000
79     health-check-reply-timeout: 500000000
80     health-check-threshold: 3
81     reply-timeout: 3000000000
82   logs.conf: |
83     default-level: debug
84     loggers:
85       - name: statscollector
86         level: info
87       - name: vpp.if-state
88         level: info
89       - name: linux.arp-conf
90         level: info
91       - name: vpp-rest
92         level: info
93   grpc.conf: |
94     network: unix
95     endpoint: /var/run/contiv/cni.sock
96     force-socket-removal: true
97     permission: 700
98   http.conf: |
99     endpoint: "0.0.0.0:9999"
100   bolt.conf: |
101     db-path: /var/bolt/bolt.db
102     file-mode: 432
103     lock-timeout: 0
104   telemetry.conf: |
105     polling-interval: 30000000000
106     disabled: true
107   linux-ifplugin.conf: |
108     dump-go-routines-count: 5
109   linux-l3plugin.conf: |
110     dump-go-routines-count: 5
111   kvscheduler.conf: |
112     record-transaction-history: true
113     transaction-history-age-limit: 1440
114     permanently-recorded-init-period: 60
115
116 ---
117
118 kind: ConfigMap
119 apiVersion: v1
120 metadata:
121   name: contiv-cni-cfg
122   namespace: kube-system
123 data:
124   # The CNI network configuration to install on each node. The special
125   # values in this config will be automatically populated.
126   10-contiv-vpp.conflist: |-
127     {
128       "name": "k8s-pod-network",
129       "cniVersion": "0.3.1",
130       "plugins": [
131         {
132           "type": "contiv-cni",
133           "grpcServer": "/var/run/contiv/cni.sock",
134           "logFile": "/var/run/contiv/cni.log"
135         },
136         {
137           "type": "portmap",
138           "capabilities": {
139               "portMappings": true
140           },
141           "externalSetMarkChain": "KUBE-MARK-MASQ"
142         }
143       ]
144     }
145 ---
146
147 ###########################################################
148 #
149 # !!! DO NOT EDIT THINGS BELOW THIS LINE !!!
150 #
151 ###########################################################
152
153
154 ###########################################################
155 #  Components and other resources
156 ###########################################################
157
158 # This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
159 # In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
160 #   kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
161 apiVersion: apps/v1beta2
162 kind: DaemonSet
163 metadata:
164   name: contiv-etcd-amd64
165   namespace: kube-system
166   labels:
167     k8s-app: contiv-etcd
168 spec:
169   selector:
170     matchLabels:
171       k8s-app: contiv-etcd
172   updateStrategy:
173     type: RollingUpdate
174   template:
175     metadata:
176       labels:
177         k8s-app: contiv-etcd
178       annotations:
179         # Marks this pod as a critical add-on.
180         scheduler.alpha.kubernetes.io/critical-pod: ''
181     spec:
182       tolerations:
183         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
184         - key: ''
185           operator: Exists
186           effect: ''
187         # This likely isn't needed due to the above wildcard, but keep it in for now.
188         - key: CriticalAddonsOnly
189           operator: Exists
190       # Only run this pod on the master.
191       nodeSelector:
192         node-role.kubernetes.io/master: ""
193         beta.kubernetes.io/arch: amd64
194       hostNetwork: true
195
196       containers:
197         - name: contiv-etcd
198           image: quay.io/coreos/etcd:v3.3.11
199           imagePullPolicy: IfNotPresent
200           env:
201             - name: CONTIV_ETCD_IP
202               valueFrom:
203                 fieldRef:
204                   fieldPath: status.podIP
205             - name: HOST_IP
206               valueFrom:
207                 fieldRef:
208                   fieldPath: status.hostIP
209             - name: ETCDCTL_API
210               value: "3"
211           command:
212             - /bin/sh
213           args:
214             - -c
215             - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
216               --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
217               --listen-peer-urls=http://0.0.0.0:12380
218           volumeMounts:
219             - name: var-etcd
220               mountPath: /var/etcd/
221           livenessProbe:
222             exec:
223               command:
224                 - /bin/sh
225                 - -c
226                 - |
227                   echo "$HOST_IP" | grep -q ':'
228                   if [ "$?" -eq "0" ];
229                   then
230                      HOST_IP="[$HOST_IP]"
231                   fi
232                   etcdctl get --endpoints=$HOST_IP:32379 /
233             periodSeconds: 3
234             initialDelaySeconds: 20
235           resources:
236             requests:
237               cpu: 100m
238       volumes:
239         - name: var-etcd
240           hostPath:
241             path: /var/etcd
242
243 ---
244 # This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
245 # In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
246 #   kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
247 apiVersion: apps/v1beta2
248 kind: DaemonSet
249 metadata:
250   name: contiv-etcd-arm64
251   namespace: kube-system
252   labels:
253     k8s-app: contiv-etcd
254 spec:
255   selector:
256     matchLabels:
257       k8s-app: contiv-etcd
258   updateStrategy:
259     type: RollingUpdate
260   template:
261     metadata:
262       labels:
263         k8s-app: contiv-etcd
264       annotations:
265         # Marks this pod as a critical add-on.
266         scheduler.alpha.kubernetes.io/critical-pod: ''
267     spec:
268       tolerations:
269         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
270         - key: ''
271           operator: Exists
272           effect: ''
273         # This likely isn't needed due to the above wildcard, but keep it in for now.
274         - key: CriticalAddonsOnly
275           operator: Exists
276       # Only run this pod on the master.
277       nodeSelector:
278         node-role.kubernetes.io/master: ""
279         beta.kubernetes.io/arch: arm64
280       hostNetwork: true
281
282       containers:
283         - name: contiv-etcd
284           image: quay.io/coreos/etcd:v3.3.11-arm64
285           imagePullPolicy: IfNotPresent
286           env:
287             - name: CONTIV_ETCD_IP
288               valueFrom:
289                 fieldRef:
290                   fieldPath: status.podIP
291             - name: HOST_IP
292               valueFrom:
293                 fieldRef:
294                   fieldPath: status.hostIP
295             - name: ETCDCTL_API
296               value: "3"
297             - name: ETCD_UNSUPPORTED_ARCH
298               value: "arm64"
299           command:
300             - /bin/sh
301           args:
302             - -c
303             - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
304               --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
305               --listen-peer-urls=http://0.0.0.0:12380
306           volumeMounts:
307             - name: var-etcd
308               mountPath: /var/etcd/
309           livenessProbe:
310             exec:
311               command:
312                 - /bin/sh
313                 - -c
314                 - |
315                   echo "$HOST_IP" | grep -q ':'
316                   if [ "$?" -eq "0" ];
317                   then
318                      HOST_IP="[$HOST_IP]"
319                   fi
320                   etcdctl get --endpoints=$HOST_IP:32379 /
321             periodSeconds: 3
322             initialDelaySeconds: 20
323           resources:
324             requests:
325               cpu: 100m
326       volumes:
327         - name: var-etcd
328           hostPath:
329             path: /var/etcd
330
331 ---
332 apiVersion: v1
333 kind: Service
334 metadata:
335   name: contiv-etcd
336   namespace: kube-system
337 spec:
338   type: NodePort
339   # Match contiv-etcd DaemonSet.
340   selector:
341     k8s-app: contiv-etcd
342   ports:
343     - port: 12379
344       nodePort: 32379
345
346 ---
347 apiVersion: v1
348 kind: ConfigMap
349 metadata:
350   name: contiv-ksr-http-cfg
351   namespace: kube-system
352 data:
353   http.conf: |
354     endpoint: "0.0.0.0:9191"
355
356 ---
357 # This config map contains ETCD configuration for connecting to the contiv-etcd defined above.
358 apiVersion: v1
359 kind: ConfigMap
360 metadata:
361   name: contiv-etcd-cfg
362   namespace: kube-system
363 data:
364   etcd.conf: |
365     insecure-transport: true
366     dial-timeout: 10000000000
367     allow-delayed-start: true
368     endpoints:
369       - "__HOST_IP__:32379"
370
371 ---
372
373 # This config map contains ETCD configuration for connecting to the contiv-etcd defined above with auto compact.
374 apiVersion: v1
375 kind: ConfigMap
376 metadata:
377   name: contiv-etcd-withcompact-cfg
378   namespace: kube-system
379 data:
380   etcd.conf: |
381     insecure-transport: true
382     dial-timeout: 10000000000
383     auto-compact: 600000000000
384     allow-delayed-start: true
385     reconnect-interval: 2000000000
386     endpoints:
387       - "__HOST_IP__:32379"
388
389 ---
390
391 # This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
392 # It consists of the following containers:
393 #   - contiv-vswitch container: contains VPP and its management agent
394 #   - contiv-cni container: installs CNI on the host
395 apiVersion: extensions/v1beta1
396 kind: DaemonSet
397 metadata:
398   name: contiv-vswitch-amd64
399   namespace: kube-system
400   labels:
401     k8s-app: contiv-vswitch
402 spec:
403   selector:
404     matchLabels:
405       k8s-app: contiv-vswitch
406   updateStrategy:
407     type: RollingUpdate
408   template:
409     metadata:
410       labels:
411         k8s-app: contiv-vswitch
412       annotations:
413         # Marks this pod as a critical add-on.
414         scheduler.alpha.kubernetes.io/critical-pod: ''
415     spec:
416       tolerations:
417         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
418         - key: ''
419           operator: Exists
420           effect: ''
421         # This likely isn't needed due to the above wildcard, but keep it in for now.
422         - key: CriticalAddonsOnly
423           operator: Exists
424       nodeSelector:
425         beta.kubernetes.io/arch: amd64
426       hostNetwork: true
427       hostPID: true
428
429       # Init containers are executed before regular containers, must finish successfully before regular ones
430       # are started.
431       initContainers:
432         # This container installs the Contiv CNI binaries and CNI network config file on each node.
433         - name: contiv-cni
434           image: iecedge/cni:v3.2.1
435           imagePullPolicy: IfNotPresent
436           env:
437             - name: SLEEP
438               value: "false"
439           volumeMounts:
440             - mountPath: /opt/cni/bin
441               name: cni-bin-dir
442             - mountPath: /etc/cni/net.d
443               name: cni-net-dir
444             - mountPath: /cni/cfg
445               name: contiv-cni-cfg
446             - mountPath: /var/run/contiv
447               name: contiv-run
448
449         # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
450         - name: vpp-init
451           image: iecedge/vswitch:v3.2.1
452           imagePullPolicy: IfNotPresent
453           env:
454             - name: HOST_IP
455               valueFrom:
456                 fieldRef:
457                   fieldPath: status.hostIP
458           command:
459             - /bin/sh
460           args:
461             - -c
462             - |
463               set -eu
464               chmod 700 /run/vpp
465               rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
466               if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
467                   cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
468               fi
469               if [ ! -d /var/run/contiv ]; then
470                   mkdir /var/run/contiv
471               fi
472               chmod 700 /var/run/contiv
473               rm -f /var/run/contiv/cni.sock
474               if ip link show vpp1 >/dev/null 2>&1; then
475                    ip link del vpp1
476               fi
477               cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
478               sysctl -w debug.exception-trace=1
479               sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
480               ulimit -c unlimited
481               echo 2 > /proc/sys/fs/suid_dumpable
482               # replace localhost IP by node IP since node port doesn't work
483               # on localhost IP in a certain scenario
484               cp /etc/etcd/etcd.conf /tmp/etcd.conf
485               set +e
486               echo "$HOST_IP" | grep -q ':'
487               if [ "$?" -eq "0" ];
488               then
489                  HOST_IP="[$HOST_IP]"
490               fi
491               sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
492           resources: {}
493           securityContext:
494             privileged: true
495           volumeMounts:
496             - name: usr-local-bin
497               mountPath: /host/usr/local/bin
498             - name: vpp-cfg
499               mountPath: /host/etc/vpp
500             - name: shm
501               mountPath: /dev/shm
502             - name: vpp-run
503               mountPath: /run/vpp
504             - name: contiv-run
505               mountPath: /var/run/contiv
506             - name: tmp
507               mountPath: /tmp
508             - name: etcd-cfg
509               mountPath: /etc/etcd
510             - name: core-dumps
511               mountPath: /var/contiv/dumps
512
513       containers:
514         # Runs contiv-vswitch container on each Kubernetes node.
515         # It contains the vSwitch VPP and its management agent.
516         - name: contiv-vswitch
517           image: iecedge/vswitch:v3.2.1
518           imagePullPolicy: IfNotPresent
519           securityContext:
520             privileged: true
521           ports:
522             # readiness + liveness probe
523             - containerPort: 9999
524           readinessProbe:
525             httpGet:
526               path: /readiness
527               port: 9999
528             periodSeconds: 3
529             timeoutSeconds: 2
530             failureThreshold: 3
531             initialDelaySeconds: 15
532           livenessProbe:
533             httpGet:
534               path: /liveness
535               port: 9999
536             periodSeconds: 3
537             timeoutSeconds: 2
538             failureThreshold: 3
539             initialDelaySeconds: 60
540           env:
541             - name: MICROSERVICE_LABEL
542               valueFrom:
543                 fieldRef:
544                   fieldPath: spec.nodeName
545             - name: HOST_IP
546               valueFrom:
547                 fieldRef:
548                   fieldPath: status.hostIP
549             - name: GOVPPMUX_NOSOCK
550               value: "1"
551             - name: CONTIV_CONFIG
552               value: "/etc/contiv/contiv.conf"
553             - name: CONTROLLER_CONFIG
554               value: "/etc/contiv/controller.conf"
555             - name: SERVICE_CONFIG
556               value: "/etc/contiv/service.conf"
557             - name: ETCD_CONFIG
558               value: "/tmp/etcd.conf"
559             - name: BOLT_CONFIG
560               value: "/etc/vpp-agent/bolt.conf"
561             # Uncomment to log graph traversal (very verbose):
562             # - name: KVSCHED_LOG_GRAPH_WALK
563             #   value: "true"
564             # Uncomment to verify effect of every transaction:
565             # - name: KVSCHED_VERIFY_MODE
566             #   value: "true"
567             - name: TELEMETRY_CONFIG
568               value: "/etc/vpp-agent/telemetry.conf"
569             - name: GOVPP_CONFIG
570               value: "/etc/vpp-agent/govpp.conf"
571             - name: LOGS_CONFIG
572               value: "/etc/vpp-agent/logs.conf"
573             - name: HTTP_CONFIG
574               value: "/etc/vpp-agent/http.conf"
575             - name: GRPC_CONFIG
576               value: "/etc/vpp-agent/grpc.conf"
577             - name: LINUX_IFPLUGIN_CONFIG
578               value: "/etc/vpp-agent/linux-ifplugin.conf"
579             - name: LINUX_L3PLUGIN_CONFIG
580               value: "/etc/vpp-agent/linux-l3plugin.conf"
581             - name: KVSCHEDULER_CONFIG
582               value: "/etc/vpp-agent/kvscheduler.conf"
583           volumeMounts:
584             - name: var-bolt
585               mountPath: /var/bolt
586             - name: etcd-cfg
587               mountPath: /etc/etcd
588             - name: vpp-cfg
589               mountPath: /etc/vpp
590             - name: shm
591               mountPath: /dev/shm
592             - name: dev
593               mountPath: /dev
594             - name: sys-bus-pci
595               mountPath: /sys/bus/pci
596             - name: vpp-run
597               mountPath: /run/vpp
598             - name: contiv-run
599               mountPath: /var/run/contiv
600             - name: contiv-agent-cfg
601               mountPath: /etc/contiv
602             - name: vpp-agent-cfg
603               mountPath: /etc/vpp-agent
604             - name: tmp
605               mountPath: /tmp
606             - name: core-dumps
607               mountPath: /var/contiv/dumps
608             - name: docker-socket
609               mountPath: /var/run/docker.sock
610           resources:
611             requests:
612               cpu: 250m
613
614       volumes:
615         # Used to connect to contiv-etcd.
616         - name: etcd-cfg
617           configMap:
618             name: contiv-etcd-cfg
619         # Used to install CNI.
620         - name: cni-bin-dir
621           hostPath:
622             path: /opt/cni/bin
623         - name: cni-net-dir
624           hostPath:
625             path: /etc/cni/net.d
626         # VPP startup config folder.
627         - name: vpp-cfg
628           hostPath:
629             path: /etc/vpp
630         # To install vppctl.
631         - name: usr-local-bin
632           hostPath:
633             path: /usr/local/bin
634         # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication
635         # with VPP (/dev/shm)
636         - name: dev
637           hostPath:
638             path: /dev
639         - name: shm
640           hostPath:
641             path: /dev/shm
642         # /sys/bus/pci is required for binding PCI devices to specific drivers
643         - name: sys-bus-pci
644           hostPath:
645             path: /sys/bus/pci
646         # For CLI unix socket.
647         - name: vpp-run
648           hostPath:
649             path: /run/vpp
650         # For CNI / STN unix domain socket
651         - name: contiv-run
652           hostPath:
653             path: /var/run/contiv
654         # Used to configure contiv agent.
655         - name: contiv-agent-cfg
656           configMap:
657             name: contiv-agent-cfg
658         # Used to configure vpp agent.
659         - name: vpp-agent-cfg
660           configMap:
661             name: vpp-agent-cfg
662         # Used for vswitch core dumps
663         - name: core-dumps
664           hostPath:
665             path: /var/contiv/dumps
666         # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
667         - name: tmp
668           emptyDir:
669             medium: Memory
670         # persisted bolt data
671         - name: var-bolt
672           hostPath:
673             path: /var/bolt
674         - name: docker-socket
675           hostPath:
676             path: /var/run/docker.sock
677         # CNI config
678         - name: contiv-cni-cfg
679           configMap:
680             name: contiv-cni-cfg
681
682 ---
683 # This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
684 # It consists of the following containers:
685 #   - contiv-vswitch container: contains VPP and its management agent
686 #   - contiv-cni container: installs CNI on the host
687 apiVersion: extensions/v1beta1
688 kind: DaemonSet
689 metadata:
690   name: contiv-vswitch-arm64
691   namespace: kube-system
692   labels:
693     k8s-app: contiv-vswitch
694 spec:
695   selector:
696     matchLabels:
697       k8s-app: contiv-vswitch
698   updateStrategy:
699     type: RollingUpdate
700   template:
701     metadata:
702       labels:
703         k8s-app: contiv-vswitch
704       annotations:
705         # Marks this pod as a critical add-on.
706         scheduler.alpha.kubernetes.io/critical-pod: ''
707     spec:
708       tolerations:
709         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
710         - key: ''
711           operator: Exists
712           effect: ''
713         # This likely isn't needed due to the above wildcard, but keep it in for now.
714         - key: CriticalAddonsOnly
715           operator: Exists
716       nodeSelector:
717         beta.kubernetes.io/arch: arm64
718       hostNetwork: true
719       hostPID: true
720
721       # Init containers are executed before regular containers, must finish successfully before regular ones
722       # are started.
723       initContainers:
724         # This container installs the Contiv CNI binaries and CNI network config file on each node.
725         - name: contiv-cni
726           image: iecedge/cni-arm64:v3.2.1
727           imagePullPolicy: IfNotPresent
728           env:
729             - name: SLEEP
730               value: "false"
731           volumeMounts:
732             - mountPath: /opt/cni/bin
733               name: cni-bin-dir
734             - mountPath: /etc/cni/net.d
735               name: cni-net-dir
736             - mountPath: /cni/cfg
737               name: contiv-cni-cfg
738             - mountPath: /var/run/contiv
739               name: contiv-run
740
741         # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
742         - name: vpp-init
743           image: iecedge/vswitch-arm64:v3.2.1
744           imagePullPolicy: IfNotPresent
745           env:
746             - name: HOST_IP
747               valueFrom:
748                 fieldRef:
749                   fieldPath: status.hostIP
750           command:
751             - /bin/sh
752           args:
753             - -c
754             - |
755               set -eu
756               chmod 700 /run/vpp
757               rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
758               if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
759                   cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
760               fi
761               if [ ! -d /var/run/contiv ]; then
762                   mkdir /var/run/contiv
763               fi
764               chmod 700 /var/run/contiv
765               rm -f /var/run/contiv/cni.sock
766               if ip link show vpp1 >/dev/null 2>&1; then
767                    ip link del vpp1
768               fi
769               cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
770               sysctl -w debug.exception-trace=1
771               sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
772               ulimit -c unlimited
773               echo 2 > /proc/sys/fs/suid_dumpable
774               # replace localhost IP by node IP since node port doesn't work
775               # on localhost IP in a certain scenario
776               cp /etc/etcd/etcd.conf /tmp/etcd.conf
777               set +e
778               echo "$HOST_IP" | grep -q ':'
779               if [ "$?" -eq "0" ];
780               then
781                  HOST_IP="[$HOST_IP]"
782               fi
783               sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
784           resources: {}
785           securityContext:
786             privileged: true
787           volumeMounts:
788             - name: usr-local-bin
789               mountPath: /host/usr/local/bin
790             - name: vpp-cfg
791               mountPath: /host/etc/vpp
792             - name: shm
793               mountPath: /dev/shm
794             - name: vpp-run
795               mountPath: /run/vpp
796             - name: contiv-run
797               mountPath: /var/run/contiv
798             - name: tmp
799               mountPath: /tmp
800             - name: etcd-cfg
801               mountPath: /etc/etcd
802             - name: core-dumps
803               mountPath: /var/contiv/dumps
804
805       containers:
806         # Runs contiv-vswitch container on each Kubernetes node.
807         # It contains the vSwitch VPP and its management agent.
808         - name: contiv-vswitch
809           image: iecedge/vswitch-arm64:v3.2.1
810           imagePullPolicy: IfNotPresent
811           securityContext:
812             privileged: true
813           ports:
814             # readiness + liveness probe
815             - containerPort: 9999
816           readinessProbe:
817             httpGet:
818               path: /readiness
819               port: 9999
820             periodSeconds: 3
821             timeoutSeconds: 2
822             failureThreshold: 3
823             initialDelaySeconds: 15
824           livenessProbe:
825             httpGet:
826               path: /liveness
827               port: 9999
828             periodSeconds: 3
829             timeoutSeconds: 2
830             failureThreshold: 3
831             initialDelaySeconds: 60
832           env:
833             - name: MICROSERVICE_LABEL
834               valueFrom:
835                 fieldRef:
836                   fieldPath: spec.nodeName
837             - name: HOST_IP
838               valueFrom:
839                 fieldRef:
840                   fieldPath: status.hostIP
841             - name: GOVPPMUX_NOSOCK
842               value: "1"
843             - name: CONTIV_CONFIG
844               value: "/etc/contiv/contiv.conf"
845             - name: CONTROLLER_CONFIG
846               value: "/etc/contiv/controller.conf"
847             - name: SERVICE_CONFIG
848               value: "/etc/contiv/service.conf"
849             - name: ETCD_CONFIG
850               value: "/tmp/etcd.conf"
851             - name: BOLT_CONFIG
852               value: "/etc/vpp-agent/bolt.conf"
853             # Uncomment to log graph traversal (very verbose):
854             # - name: KVSCHED_LOG_GRAPH_WALK
855             #   value: "true"
856             # Uncomment to verify effect of every transaction:
857             # - name: KVSCHED_VERIFY_MODE
858             #   value: "true"
859             - name: TELEMETRY_CONFIG
860               value: "/etc/vpp-agent/telemetry.conf"
861             - name: GOVPP_CONFIG
862               value: "/etc/vpp-agent/govpp.conf"
863             - name: LOGS_CONFIG
864               value: "/etc/vpp-agent/logs.conf"
865             - name: HTTP_CONFIG
866               value: "/etc/vpp-agent/http.conf"
867             - name: GRPC_CONFIG
868               value: "/etc/vpp-agent/grpc.conf"
869             - name: LINUX_IFPLUGIN_CONFIG
870               value: "/etc/vpp-agent/linux-ifplugin.conf"
871             - name: LINUX_L3PLUGIN_CONFIG
872               value: "/etc/vpp-agent/linux-l3plugin.conf"
873             - name: KVSCHEDULER_CONFIG
874               value: "/etc/vpp-agent/kvscheduler.conf"
875           volumeMounts:
876             - name: var-bolt
877               mountPath: /var/bolt
878             - name: etcd-cfg
879               mountPath: /etc/etcd
880             - name: vpp-cfg
881               mountPath: /etc/vpp
882             - name: shm
883               mountPath: /dev/shm
884             - name: dev
885               mountPath: /dev
886             - name: sys-bus-pci
887               mountPath: /sys/bus/pci
888             - name: vpp-run
889               mountPath: /run/vpp
890             - name: contiv-run
891               mountPath: /var/run/contiv
892             - name: contiv-agent-cfg
893               mountPath: /etc/contiv
894             - name: vpp-agent-cfg
895               mountPath: /etc/vpp-agent
896             - name: tmp
897               mountPath: /tmp
898             - name: core-dumps
899               mountPath: /var/contiv/dumps
900             - name: docker-socket
901               mountPath: /var/run/docker.sock
902           resources:
903             requests:
904               cpu: 250m
905
906       volumes:
907         # Used to connect to contiv-etcd.
908         - name: etcd-cfg
909           configMap:
910             name: contiv-etcd-cfg
911         # Used to install CNI.
912         - name: cni-bin-dir
913           hostPath:
914             path: /opt/cni/bin
915         - name: cni-net-dir
916           hostPath:
917             path: /etc/cni/net.d
918         # VPP startup config folder.
919         - name: vpp-cfg
920           hostPath:
921             path: /etc/vpp
922         # To install vppctl.
923         - name: usr-local-bin
924           hostPath:
925             path: /usr/local/bin
926         # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication with
927         # VPP (/dev/shm)
928         - name: dev
929           hostPath:
930             path: /dev
931         - name: shm
932           hostPath:
933             path: /dev/shm
934         # /sys/bus/pci is required for binding PCI devices to specific drivers
935         - name: sys-bus-pci
936           hostPath:
937             path: /sys/bus/pci
938         # For CLI unix socket.
939         - name: vpp-run
940           hostPath:
941             path: /run/vpp
942         # For CNI / STN unix domain socket
943         - name: contiv-run
944           hostPath:
945             path: /var/run/contiv
946         # Used to configure contiv agent.
947         - name: contiv-agent-cfg
948           configMap:
949             name: contiv-agent-cfg
950         # Used to configure vpp agent.
951         - name: vpp-agent-cfg
952           configMap:
953             name: vpp-agent-cfg
954         # Used for vswitch core dumps
955         - name: core-dumps
956           hostPath:
957             path: /var/contiv/dumps
958         # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
959         - name: tmp
960           emptyDir:
961             medium: Memory
962         # persisted bolt data
963         - name: var-bolt
964           hostPath:
965             path: /var/bolt
966         - name: docker-socket
967           hostPath:
968             path: /var/run/docker.sock
969         # CNI config
970         - name: contiv-cni-cfg
971           configMap:
972             name: contiv-cni-cfg
973
974 ---
975 # This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
976 apiVersion: extensions/v1beta1
977 kind: DaemonSet
978 metadata:
979   name: contiv-ksr-amd64
980   namespace: kube-system
981   labels:
982     k8s-app: contiv-ksr
983 spec:
984   updateStrategy:
985     type: RollingUpdate
986   template:
987     metadata:
988       labels:
989         k8s-app: contiv-ksr
990       annotations:
991         # Marks this pod as a critical add-on.
992         scheduler.alpha.kubernetes.io/critical-pod: ''
993     spec:
994       tolerations:
995         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
996         - key: ''
997           operator: Exists
998           effect: ''
999         # This likely isn't needed due to the above wildcard, but keep it in for now.
1000         - key: CriticalAddonsOnly
1001           operator: Exists
1002       # Only run this pod on the master.
1003       nodeSelector:
1004         node-role.kubernetes.io/master: ""
1005         beta.kubernetes.io/arch: amd64
1006       hostNetwork: true
1007       # This grants the required permissions to contiv-ksr.
1008       serviceAccountName: contiv-ksr
1009
1010       initContainers:
1011         # This init container waits until etcd is started
1012         - name: wait-foretcd
1013           env:
1014             - name: ETCDPORT
1015               value: "32379"
1016             - name: HOST_IP
1017               valueFrom:
1018                 fieldRef:
1019                   fieldPath: status.hostIP
1020           image: busybox:1.29.3
1021           imagePullPolicy: IfNotPresent
1022           command:
1023             - /bin/sh
1024           args:
1025             - -c
1026             - |
1027               cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
1028               echo "$HOST_IP" | grep -q ':'
1029               if [ "$?" -eq "0" ];
1030               then
1031                  HOST_IP="[$HOST_IP]"
1032               fi
1033               sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
1034               until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
1035           volumeMounts:
1036             - name: tmp-cfg
1037               mountPath: /tmp/cfg
1038             - name: etcd-cfg
1039               mountPath: /etc/etcd
1040
1041
1042       containers:
1043         - name: contiv-ksr
1044           image: iecedge/ksr:v3.2.1
1045           imagePullPolicy: IfNotPresent
1046           env:
1047             - name: ETCD_CONFIG
1048               value: "/tmp/cfg/etcd.conf"
1049             - name: HTTP_CONFIG
1050               value: "/etc/http/http.conf"
1051           volumeMounts:
1052             - name: tmp-cfg
1053               mountPath: /tmp/cfg
1054             - name: http-cfg
1055               mountPath: /etc/http
1056           readinessProbe:
1057             httpGet:
1058               path: /readiness
1059               port: 9191
1060             periodSeconds: 3
1061             timeoutSeconds: 2
1062             failureThreshold: 3
1063             initialDelaySeconds: 10
1064           livenessProbe:
1065             httpGet:
1066               path: /liveness
1067               port: 9191
1068             periodSeconds: 3
1069             timeoutSeconds: 2
1070             failureThreshold: 3
1071             initialDelaySeconds: 30
1072           resources:
1073             requests:
1074               cpu: 100m
1075       volumes:
1076         # Used to connect to contiv-etcd.
1077         - name: etcd-cfg
1078           configMap:
1079             name: contiv-etcd-withcompact-cfg
1080         - name: tmp-cfg
1081           emptyDir: {}
1082         - name: http-cfg
1083           configMap:
1084             name: contiv-ksr-http-cfg
1085
1086 ---
1087 # This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
1088 apiVersion: extensions/v1beta1
1089 kind: DaemonSet
1090 metadata:
1091   name: contiv-ksr-arm64
1092   namespace: kube-system
1093   labels:
1094     k8s-app: contiv-ksr
1095 spec:
1096   updateStrategy:
1097     type: RollingUpdate
1098   template:
1099     metadata:
1100       labels:
1101         k8s-app: contiv-ksr
1102       annotations:
1103         # Marks this pod as a critical add-on.
1104         scheduler.alpha.kubernetes.io/critical-pod: ''
1105     spec:
1106       tolerations:
1107         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
1108         - key: ''
1109           operator: Exists
1110           effect: ''
1111         # This likely isn't needed due to the above wildcard, but keep it in for now.
1112         - key: CriticalAddonsOnly
1113           operator: Exists
1114       # Only run this pod on the master.
1115       nodeSelector:
1116         node-role.kubernetes.io/master: ""
1117         beta.kubernetes.io/arch: arm64
1118       hostNetwork: true
1119       # This grants the required permissions to contiv-ksr.
1120       serviceAccountName: contiv-ksr
1121
1122       initContainers:
1123         # This init container waits until etcd is started
1124         - name: wait-foretcd
1125           env:
1126             - name: ETCDPORT
1127               value: "32379"
1128             - name: HOST_IP
1129               valueFrom:
1130                 fieldRef:
1131                   fieldPath: status.hostIP
1132           image: arm64v8/busybox:1.29.3
1133           imagePullPolicy: IfNotPresent
1134           command:
1135             - /bin/sh
1136           args:
1137             - -c
1138             - |
1139               cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
1140               echo "$HOST_IP" | grep -q ':'
1141               if [ "$?" -eq "0" ];
1142               then
1143                  HOST_IP="[$HOST_IP]"
1144               fi
1145               sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
1146               until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
1147           volumeMounts:
1148             - name: tmp-cfg
1149               mountPath: /tmp/cfg
1150             - name: etcd-cfg
1151               mountPath: /etc/etcd
1152
1153
1154       containers:
1155         - name: contiv-ksr
1156           image: iecedge/ksr-arm64:v3.2.1
1157           imagePullPolicy: IfNotPresent
1158           env:
1159             - name: ETCD_CONFIG
1160               value: "/tmp/cfg/etcd.conf"
1161             - name: HTTP_CONFIG
1162               value: "/etc/http/http.conf"
1163           volumeMounts:
1164             - name: tmp-cfg
1165               mountPath: /tmp/cfg
1166             - name: http-cfg
1167               mountPath: /etc/http
1168           readinessProbe:
1169             httpGet:
1170               path: /readiness
1171               port: 9191
1172             periodSeconds: 3
1173             timeoutSeconds: 2
1174             failureThreshold: 3
1175             initialDelaySeconds: 10
1176           livenessProbe:
1177             httpGet:
1178               path: /liveness
1179               port: 9191
1180             periodSeconds: 3
1181             timeoutSeconds: 2
1182             failureThreshold: 3
1183             initialDelaySeconds: 30
1184           resources:
1185             requests:
1186               cpu: 100m
1187       volumes:
1188         # Used to connect to contiv-etcd.
1189         - name: etcd-cfg
1190           configMap:
1191             name: contiv-etcd-withcompact-cfg
1192         - name: tmp-cfg
1193           emptyDir: {}
1194         - name: http-cfg
1195           configMap:
1196             name: contiv-ksr-http-cfg
1197
1198 ---
1199
1200 # This cluster role defines a set of permissions required for contiv-ksr.
1201 apiVersion: rbac.authorization.k8s.io/v1beta1
1202 kind: ClusterRole
1203 metadata:
1204   name: contiv-ksr
1205   namespace: kube-system
1206 rules:
1207   - apiGroups:
1208       - ""
1209       - extensions
1210     resources:
1211       - pods
1212       - namespaces
1213       - networkpolicies
1214       - services
1215       - endpoints
1216       - nodes
1217     verbs:
1218       - watch
1219       - list
1220
1221 ---
1222
1223 # This defines a service account for contiv-ksr.
1224 apiVersion: v1
1225 kind: ServiceAccount
1226 metadata:
1227   name: contiv-ksr
1228   namespace: kube-system
1229
1230 ---
1231
1232 # This binds the contiv-ksr cluster role with contiv-ksr service account.
1233 apiVersion: rbac.authorization.k8s.io/v1beta1
1234 kind: ClusterRoleBinding
1235 metadata:
1236   name: contiv-ksr
1237 roleRef:
1238   apiGroup: rbac.authorization.k8s.io
1239   kind: ClusterRole
1240   name: contiv-ksr
1241 subjects:
1242   - kind: ServiceAccount
1243     name: contiv-ksr
1244     namespace: kube-system
1245
1246 ---
1247
1248 # This installs the contiv-crd on the master node in a Kubernetes cluster.
1249 apiVersion: extensions/v1beta1
1250 kind: DaemonSet
1251 metadata:
1252   name: contiv-crd-amd64
1253   namespace: kube-system
1254   labels:
1255     k8s-app: contiv-crd
1256 spec:
1257   updateStrategy:
1258     type: RollingUpdate
1259   template:
1260     metadata:
1261       labels:
1262         k8s-app: contiv-crd
1263       annotations:
1264         # Marks this pod as a critical add-on.
1265         scheduler.alpha.kubernetes.io/critical-pod: ''
1266     spec:
1267       tolerations:
1268         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
1269         - key: ''
1270           operator: Exists
1271           effect: ''
1272         # This likely isn't needed due to the above wildcard, but keep it in for now.
1273         - key: CriticalAddonsOnly
1274           operator: Exists
1275       # Only run this pod on the master.
1276       nodeSelector:
1277         node-role.kubernetes.io/master: ""
1278         beta.kubernetes.io/arch: amd64
1279       hostNetwork: true
1280       # This grants the required permissions to contiv-crd.
1281       serviceAccountName: contiv-crd
1282
1283       initContainers:
1284         # This init container waits until etcd is started
1285         - name: wait-foretcd
1286           env:
1287             - name: ETCDPORT
1288               value: "32379"
1289             - name: HOST_IP
1290               valueFrom:
1291                 fieldRef:
1292                   fieldPath: status.hostIP
1293           image: busybox:1.29.3
1294           imagePullPolicy: IfNotPresent
1295           command:
1296             - /bin/sh
1297           args:
1298             - -c
1299             - |
1300               cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
1301               echo "$HOST_IP" | grep -q ':'
1302               if [ "$?" -eq "0" ];
1303               then
1304                  HOST_IP="[$HOST_IP]"
1305               fi
1306               sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
1307               until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
1308           volumeMounts:
1309             - name: tmp-cfg
1310               mountPath: /tmp/cfg
1311             - name: etcd-cfg
1312               mountPath: /etc/etcd
1313
1314         # This init container copies contiv-netctl tool to the host.
1315         - name: netctl-init
1316           image: iecedge/crd:v3.2.1
1317           imagePullPolicy: IfNotPresent
1318           command:
1319             - /bin/sh
1320           args:
1321             - -c
1322             - |
1323               echo '#!/bin/sh
1324               kubectl get pods -n kube-system | \
1325                 grep contiv-crd | \
1326                 cut -d " " -f 1 | \
1327                 xargs -I{} kubectl exec -n kube-system {} \
1328                 /contiv-netctl "$@"' \
1329               > /host/usr/local/bin/contiv-netctl || true
1330               chmod +x /host/usr/local/bin/contiv-netctl || true
1331           volumeMounts:
1332             - name: usr-local-bin
1333               mountPath: /host/usr/local/bin
1334
1335       containers:
1336         - name: contiv-crd
1337           image: iecedge/crd:v3.2.1
1338           imagePullPolicy: IfNotPresent
1339           env:
1340             - name: ETCD_CONFIG
1341               value: "/tmp/cfg/etcd.conf"
1342             - name: HTTP_CONFIG
1343               value: "/etc/http/http.conf"
1344             - name: HTTP_CLIENT_CONFIG
1345               value: "/etc/http/http.client.conf"
1346             - name: CONTIV_CRD_VALIDATE_INTERVAL
1347               value: "5"
1348             - name: CONTIV_CRD_VALIDATE_STATE
1349               value: "SB"
1350             - name: DISABLE_NETCTL_REST
1351               value: "true"
1352           volumeMounts:
1353             - name: tmp-cfg
1354               mountPath: /tmp/cfg
1355             - name: http-cfg
1356               mountPath: /etc/http
1357           readinessProbe:
1358             httpGet:
1359               path: /readiness
1360               port: 9090
1361             periodSeconds: 3
1362             timeoutSeconds: 2
1363             failureThreshold: 3
1364             initialDelaySeconds: 10
1365           livenessProbe:
1366             httpGet:
1367               path: /liveness
1368               port: 9090
1369             periodSeconds: 3
1370             timeoutSeconds: 2
1371             failureThreshold: 3
1372             initialDelaySeconds: 30
1373           resources:
1374             requests:
1375               cpu: 100m
1376
1377       volumes:
1378         # Used to connect to contiv-etcd.
1379         - name: etcd-cfg
1380           configMap:
1381             name: contiv-etcd-cfg
1382         - name: usr-local-bin
1383           hostPath:
1384             path: /usr/local/bin
1385         - name: http-cfg
1386           configMap:
1387             name: contiv-crd-http-cfg
1388         - name: tmp-cfg
1389           emptyDir: {}
1390 ---
1391 # This installs the contiv-crd on the master node in a Kubernetes cluster.
1392 apiVersion: extensions/v1beta1
1393 kind: DaemonSet
1394 metadata:
1395   name: contiv-crd-arm64
1396   namespace: kube-system
1397   labels:
1398     k8s-app: contiv-crd
1399 spec:
1400   updateStrategy:
1401     type: RollingUpdate
1402   template:
1403     metadata:
1404       labels:
1405         k8s-app: contiv-crd
1406       annotations:
1407         # Marks this pod as a critical add-on.
1408         scheduler.alpha.kubernetes.io/critical-pod: ''
1409     spec:
1410       tolerations:
1411         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
1412         - key: ''
1413           operator: Exists
1414           effect: ''
1415         # This likely isn't needed due to the above wildcard, but keep it in for now.
1416         - key: CriticalAddonsOnly
1417           operator: Exists
1418       # Only run this pod on the master.
1419       nodeSelector:
1420         node-role.kubernetes.io/master: ""
1421         beta.kubernetes.io/arch: arm64
1422       hostNetwork: true
1423       # This grants the required permissions to contiv-crd.
1424       serviceAccountName: contiv-crd
1425
1426       initContainers:
1427         # This init container waits until etcd is started
1428         - name: wait-foretcd
1429           env:
1430             - name: ETCDPORT
1431               value: "32379"
1432             - name: HOST_IP
1433               valueFrom:
1434                 fieldRef:
1435                   fieldPath: status.hostIP
1436           image: arm64v8/busybox:1.29.3
1437           imagePullPolicy: IfNotPresent
1438           command:
1439             - /bin/sh
1440           args:
1441             - -c
1442             - |
1443               cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
1444               echo "$HOST_IP" | grep -q ':'
1445               if [ "$?" -eq "0" ];
1446               then
1447                  HOST_IP="[$HOST_IP]"
1448               fi
1449               sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
1450               until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
1451           volumeMounts:
1452             - name: tmp-cfg
1453               mountPath: /tmp/cfg
1454             - name: etcd-cfg
1455               mountPath: /etc/etcd
1456
1457         # This init container copies contiv-netctl tool to the host.
1458         - name: netctl-init
1459           image: iecedge/crd-arm64:v3.2.1
1460           imagePullPolicy: IfNotPresent
1461           command:
1462             - /bin/sh
1463           args:
1464             - -c
1465             - |
1466               echo '#!/bin/sh
1467               kubectl get pods -n kube-system | \
1468                 grep contiv-crd | \
1469                 cut -d " " -f 1 | \
1470                 xargs -I{} kubectl exec -n kube-system {} \
1471                 /contiv-netctl "$@"' \
1472               > /host/usr/local/bin/contiv-netctl || true
1473               chmod +x /host/usr/local/bin/contiv-netctl || true
1474           volumeMounts:
1475             - name: usr-local-bin
1476               mountPath: /host/usr/local/bin
1477
1478       containers:
1479         - name: contiv-crd
1480           image: iecedge/crd-arm64:v3.2.1
1481           imagePullPolicy: IfNotPresent
1482           env:
1483             - name: ETCD_CONFIG
1484               value: "/tmp/cfg/etcd.conf"
1485             - name: HTTP_CONFIG
1486               value: "/etc/http/http.conf"
1487             - name: HTTP_CLIENT_CONFIG
1488               value: "/etc/http/http.client.conf"
1489             - name: CONTIV_CRD_VALIDATE_INTERVAL
1490               value: "5"
1491             - name: CONTIV_CRD_VALIDATE_STATE
1492               value: "SB"
1493             - name: DISABLE_NETCTL_REST
1494               value: "true"
1495           volumeMounts:
1496             - name: tmp-cfg
1497               mountPath: /tmp/cfg
1498             - name: http-cfg
1499               mountPath: /etc/http
1500           readinessProbe:
1501             httpGet:
1502               path: /readiness
1503               port: 9090
1504             periodSeconds: 3
1505             timeoutSeconds: 2
1506             failureThreshold: 3
1507             initialDelaySeconds: 10
1508           livenessProbe:
1509             httpGet:
1510               path: /liveness
1511               port: 9090
1512             periodSeconds: 3
1513             timeoutSeconds: 2
1514             failureThreshold: 3
1515             initialDelaySeconds: 30
1516           resources:
1517             requests:
1518               cpu: 100m
1519
1520       volumes:
1521         # Used to connect to contiv-etcd.
1522         - name: etcd-cfg
1523           configMap:
1524             name: contiv-etcd-cfg
1525         - name: usr-local-bin
1526           hostPath:
1527             path: /usr/local/bin
1528         - name: http-cfg
1529           configMap:
1530             name: contiv-crd-http-cfg
1531         - name: tmp-cfg
1532           emptyDir: {}
1533 ---
1534
1535 # This cluster role defines a set of permissions required for contiv-crd.
1536 apiVersion: rbac.authorization.k8s.io/v1beta1
1537 kind: ClusterRole
1538 metadata:
1539   name: contiv-crd
1540   namespace: kube-system
1541 rules:
1542   - apiGroups:
1543       - apiextensions.k8s.io
1544       - nodeconfig.contiv.vpp
1545       - telemetry.contiv.vpp
1546     resources:
1547       - customresourcedefinitions
1548       - telemetryreports
1549       - nodeconfigs
1550     verbs:
1551       - "*"
1552
1553 ---
1554
1555 # This defines a service account for contiv-crd.
1556 apiVersion: v1
1557 kind: ServiceAccount
1558 metadata:
1559   name: contiv-crd
1560   namespace: kube-system
1561
1562 ---
1563
1564 # This binds the contiv-crd cluster role with contiv-crd service account.
1565 apiVersion: rbac.authorization.k8s.io/v1beta1
1566 kind: ClusterRoleBinding
1567 metadata:
1568   name: contiv-crd
1569 roleRef:
1570   apiGroup: rbac.authorization.k8s.io
1571   kind: ClusterRole
1572   name: contiv-crd
1573 subjects:
1574   - kind: ServiceAccount
1575     name: contiv-crd
1576     namespace: kube-system
1577
1578 ---
1579
1580 apiVersion: v1
1581 kind: ConfigMap
1582 metadata:
1583   name: contiv-crd-http-cfg
1584   namespace: kube-system
1585 data:
1586   http.conf: |
1587     endpoint: "0.0.0.0:9090"
1588   http.client.conf: |
1589     port: 9999
1590     use-https: false