Upstream documentation for IEC release 2
[iec.git] / src / foundation / scripts / cni / contivpp / contiv-vpp-macbin.yaml
1 ---
2 # Source: contiv-vpp/templates/vpp.yaml
3 # Contiv-VPP deployment YAML file. This deploys Contiv VPP networking on a Kuberntes cluster.
4 # The deployment consists of the following components:
5 #   - contiv-etcd - deployed on k8s master
6 #   - contiv-vswitch - deployed on each k8s node
7 #   - contiv-ksr - deployed on k8s master
8
9 ###########################################################
10 #  Configuration
11 ###########################################################
12
13 # This config map contains contiv-agent configuration. The most important part is the ipamConfig,
14 # which may be updated in case the default IPAM settings do not match your needs.
15 # nodeConfig may be used in case your nodes have more than 1 VPP interface. In that case, one
16 # of them needs to be marked as the main inter-node interface, and the rest of them can be
17 # configured with any IP addresses (the IPs cannot conflict with the main IPAM config).
18 apiVersion: v1
19 kind: ConfigMap
20 metadata:
21   name: contiv-agent-cfg
22   namespace: kube-system
23 data:
24   contiv.conf: |-
25     nodeToNodeTransport: vxlan
26     useSRv6ForServices: false
27     useTAPInterfaces: true
28     tapInterfaceVersion: 2
29     tapv2RxRingSize: 256
30     tapv2TxRingSize: 256
31     enableGSO: false
32     tcpChecksumOffloadDisabled: true
33     STNVersion: 2
34     natExternalTraffic: true
35     mtuSize: 1450
36     scanIPNeighbors: true
37     ipNeighborScanInterval: 1
38     ipNeighborStaleThreshold: 4
39     enablePacketTrace: false
40     routeServiceCIDRToVPP: false
41     crdNodeConfigurationDisabled: true
42     ipamConfig:
43       nodeInterconnectDHCP: false
44       nodeInterconnectCIDR: 192.168.16.0/24
45       podSubnetCIDR: 10.1.0.0/16
46       podSubnetOneNodePrefixLen: 24
47       vppHostSubnetCIDR: 172.30.0.0/16
48       vppHostSubnetOneNodePrefixLen: 24
49       vxlanCIDR: 192.168.30.0/24
50       srv6:
51         servicePolicyBSIDSubnetCIDR: 8fff::/16
52         servicePodLocalSIDSubnetCIDR: 9300::/16
53         serviceHostLocalSIDSubnetCIDR: 9300::/16
54         serviceNodeLocalSIDSubnetCIDR: 9000::/16
55         nodeToNodePodLocalSIDSubnetCIDR: 9501::/16
56         nodeToNodeHostLocalSIDSubnetCIDR: 9500::/16
57         nodeToNodePodPolicySIDSubnetCIDR: 8501::/16
58         nodeToNodeHostPolicySIDSubnetCIDR: 8500::/16
59     nodeConfig:
60     - nodeName: net-arm-mcbin-iec
61       mainVppInterface:
62         interfaceName: mv-ppio-0/0
63     - nodeName: net-arm-mcbin-iec-1
64       mainVppInterface:
65         interfaceName: mv-ppio-0/0
66   controller.conf: |
67     enableRetry: true
68     delayRetry: 1000000000
69     maxRetryAttempts: 3
70     enableExpBackoffRetry: true
71     delayLocalResync: 5000000000
72     startupResyncDeadline: 30000000000
73     enablePeriodicHealing: false
74     periodicHealingInterval: 30000000000
75     delayAfterErrorHealing: 5000000000
76     remoteDBProbingInterval: 3000000000
77     recordEventHistory: true
78     eventHistoryAgeLimit: 60
79     permanentlyRecordedInitPeriod: 10
80   service.conf: |
81     cleanupIdleNATSessions: true
82     tcpNATSessionTimeout: 180
83     otherNATSessionTimeout: 5
84     serviceLocalEndpointWeight: 1
85     disableNATVirtualReassembly: false
86
87 ---
88
89 apiVersion: v1
90 kind: ConfigMap
91 metadata:
92   name: vpp-agent-cfg
93   namespace: kube-system
94 data:
95   govpp.conf: |
96     health-check-probe-interval: 3000000000
97     health-check-reply-timeout: 500000000
98     health-check-threshold: 3
99     reply-timeout: 3000000000
100   logs.conf: |
101     default-level: debug
102     loggers:
103       - name: statscollector
104         level: info
105       - name: vpp.if-state
106         level: info
107       - name: linux.arp-conf
108         level: info
109       - name: vpp-rest
110         level: info
111   grpc.conf: |
112     network: unix
113     endpoint: /var/run/contiv/cni.sock
114     force-socket-removal: true
115     permission: 700
116   http.conf: |
117     endpoint: "0.0.0.0:9999"
118   bolt.conf: |
119     db-path: /var/bolt/bolt.db
120     file-mode: 432
121     lock-timeout: 0
122   telemetry.conf: |
123     polling-interval: 30000000000
124     disabled: true
125   linux-ifplugin.conf: |
126     dump-go-routines-count: 5
127   linux-l3plugin.conf: |
128     dump-go-routines-count: 5
129   kvscheduler.conf: |
130     record-transaction-history: true
131     transaction-history-age-limit: 60
132     permanently-recorded-init-period: 10
133
134 ---
135
136 kind: ConfigMap
137 apiVersion: v1
138 metadata:
139   name: contiv-cni-cfg
140   namespace: kube-system
141 data:
142   # The CNI network configuration to install on each node. The special
143   # values in this config will be automatically populated.
144   10-contiv-vpp.conflist: |-
145     {
146       "name": "k8s-pod-network",
147       "cniVersion": "0.3.1",
148       "plugins": [
149         {
150           "type": "contiv-cni",
151           "grpcServer": "/var/run/contiv/cni.sock",
152           "logFile": "/var/run/contiv/cni.log"
153         },
154         {
155           "type": "portmap",
156           "capabilities": {
157               "portMappings": true
158           },
159           "externalSetMarkChain": "KUBE-MARK-MASQ"
160         }
161       ]
162     }
163 ---
164
165 ###########################################################
166 #
167 # !!! DO NOT EDIT THINGS BELOW THIS LINE !!!
168 #
169 ###########################################################
170
171
172 ###########################################################
173 #  Components and other resources
174 ###########################################################
175
176 # This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
177 # In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
178 #   kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
179 apiVersion: apps/v1beta2
180 kind: StatefulSet
181 metadata:
182   name: contiv-etcd
183   namespace: kube-system
184   labels:
185     k8s-app: contiv-etcd
186 spec:
187   serviceName: contiv-etcd
188   selector:
189     matchLabels:
190       k8s-app: contiv-etcd
191   updateStrategy:
192     type: RollingUpdate
193   template:
194     metadata:
195       labels:
196         k8s-app: contiv-etcd
197       annotations:
198         # Marks this pod as a critical add-on.
199         scheduler.alpha.kubernetes.io/critical-pod: ''
200     spec:
201       tolerations:
202         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
203         - key: ''
204           operator: Exists
205           effect: ''
206         # This likely isn't needed due to the above wildcard, but keep it in for now.
207         - key: CriticalAddonsOnly
208           operator: Exists
209       # Only run this pod on the master.
210       nodeSelector:
211         node-role.kubernetes.io/master: ""
212       hostNetwork: true
213
214       containers:
215         - name: contiv-etcd
216           image: quay.io/coreos/etcd:v3.3.11-arm64
217           imagePullPolicy: IfNotPresent
218           env:
219             - name: CONTIV_ETCD_IP
220               valueFrom:
221                 fieldRef:
222                   fieldPath: status.podIP
223             - name: HOST_IP
224               valueFrom:
225                 fieldRef:
226                   fieldPath: status.hostIP
227             - name: ETCDCTL_API
228               value: "3"
229             - name: ETCD_UNSUPPORTED_ARCH
230               value: "arm64"
231           command:
232             - /bin/sh
233           args:
234             - -c
235             - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
236               --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
237               --listen-peer-urls=http://0.0.0.0:12380
238           volumeMounts:
239             - name: var-etcd
240               mountPath: /var/etcd/
241           livenessProbe:
242             exec:
243               command:
244                 - /bin/sh
245                 - -c
246                 - |
247                   echo "$HOST_IP" | grep -q ':'
248                   if [ "$?" -eq "0" ];
249                   then
250                      HOST_IP="[$HOST_IP]"
251                   fi
252                   etcdctl get --endpoints=$HOST_IP:32379 /
253             periodSeconds: 3
254             initialDelaySeconds: 20
255           resources:
256             requests:
257               cpu: 100m
258       volumes:
259         - name: var-etcd
260           hostPath:
261             path: /var/etcd
262
263 ---
264
265 apiVersion: v1
266 kind: Service
267 metadata:
268   name: contiv-etcd
269   namespace: kube-system
270 spec:
271   type: NodePort
272   # Match contiv-etcd DaemonSet.
273   selector:
274     k8s-app: contiv-etcd
275   ports:
276     - port: 12379
277       nodePort: 32379
278 ---
279 apiVersion: v1
280 kind: ConfigMap
281 metadata:
282   name: contiv-ksr-http-cfg
283   namespace: kube-system
284 data:
285   http.conf: |
286     endpoint: "0.0.0.0:9191"
287
288 ---
289 # This config map contains ETCD configuration for connecting to the contiv-etcd defined above.
290 apiVersion: v1
291 kind: ConfigMap
292 metadata:
293   name: contiv-etcd-cfg
294   namespace: kube-system
295 data:
296   etcd.conf: |
297     dial-timeout: 10000000000
298     allow-delayed-start: true
299     insecure-transport: true
300     endpoints:
301       - "__HOST_IP__:32379"
302
303 ---
304
305 # This config map contains ETCD configuration for connecting to the contiv-etcd defined above with auto compact.
306 apiVersion: v1
307 kind: ConfigMap
308 metadata:
309   name: contiv-etcd-withcompact-cfg
310   namespace: kube-system
311 data:
312   etcd.conf: |
313     insecure-transport: true
314     dial-timeout: 10000000000
315     auto-compact: 600000000000
316     allow-delayed-start: true
317     reconnect-interval: 2000000000
318     endpoints:
319       - "__HOST_IP__:32379"
320
321 ---
322
323 # This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
324 # It consists of the following containers:
325 #   - contiv-vswitch container: contains VPP and its management agent
326 #   - contiv-cni container: installs CNI on the host
327 apiVersion: extensions/v1beta1
328 kind: DaemonSet
329 metadata:
330   name: contiv-vswitch
331   namespace: kube-system
332   labels:
333     k8s-app: contiv-vswitch
334 spec:
335   selector:
336     matchLabels:
337       k8s-app: contiv-vswitch
338   updateStrategy:
339     type: RollingUpdate
340   template:
341     metadata:
342       labels:
343         k8s-app: contiv-vswitch
344       annotations:
345         # Marks this pod as a critical add-on.
346         scheduler.alpha.kubernetes.io/critical-pod: ''
347     spec:
348       tolerations:
349         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
350         - key: ''
351           operator: Exists
352           effect: ''
353         # This likely isn't needed due to the above wildcard, but keep it in for now.
354         - key: CriticalAddonsOnly
355           operator: Exists
356       hostNetwork: true
357       hostPID: true
358
359       # Init containers are executed before regular containers, must finish successfully before regular
360       # ones are started.
361       initContainers:
362         # This container installs the Contiv CNI binaries and CNI network config file on each node.
363         - name: contiv-cni
364           image: iecedge/cni-arm64:v3.2.1-macbin
365           imagePullPolicy: IfNotPresent
366           env:
367             - name: SLEEP
368               value: "false"
369           volumeMounts:
370             - mountPath: /opt/cni/bin
371               name: cni-bin-dir
372             - mountPath: /etc/cni/net.d
373               name: cni-net-dir
374             - mountPath: /cni/cfg
375               name: contiv-cni-cfg
376             - mountPath: /var/run/contiv
377               name: contiv-run
378
379         # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
380         - name: vpp-init
381           image: iecedge/vswitch-arm64:v3.2.1-macbin
382           imagePullPolicy: IfNotPresent
383           env:
384             - name: HOST_IP
385               valueFrom:
386                 fieldRef:
387                   fieldPath: status.hostIP
388           command:
389             - /bin/sh
390           args:
391             - -c
392             - |
393               set -eu
394               chmod 700 /run/vpp
395               rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
396               if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
397                   cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
398               fi
399               if [ ! -d /var/run/contiv ]; then
400                   mkdir /var/run/contiv
401               fi
402               chmod 700 /var/run/contiv
403               rm -f /var/run/contiv/cni.sock
404               if ip link show vpp1 >/dev/null 2>&1; then
405                    ip link del vpp1
406               fi
407               cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
408               sysctl -w debug.exception-trace=1
409               sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
410               ulimit -c unlimited
411               echo 2 > /proc/sys/fs/suid_dumpable
412               # replace localhost IP by node IP since node port doesn't work
413               # on localhost IP in a certain scenario
414               cp /etc/etcd/etcd.conf /tmp/etcd.conf
415               set +e
416               echo "$HOST_IP" | grep -q ':'
417               if [ "$?" -eq "0" ];
418               then
419                  HOST_IP="[$HOST_IP]"
420               fi
421               sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
422           resources: {}
423           securityContext:
424             privileged: true
425           volumeMounts:
426             - name: usr-local-bin
427               mountPath: /host/usr/local/bin
428             - name: vpp-cfg
429               mountPath: /host/etc/vpp
430             - name: shm
431               mountPath: /dev/shm
432             - name: vpp-run
433               mountPath: /run/vpp
434             - name: contiv-run
435               mountPath: /var/run/contiv
436             - name: tmp
437               mountPath: /tmp
438             - name: etcd-cfg
439               mountPath: /etc/etcd
440             - name: core-dumps
441               mountPath: /var/contiv/dumps
442
443       containers:
444         # Runs contiv-vswitch container on each Kubernetes node.
445         # It contains the vSwitch VPP and its management agent.
446         - name: contiv-vswitch
447           image: iecedge/vswitch-arm64:v3.2.1-macbin
448           imagePullPolicy: IfNotPresent
449           securityContext:
450             privileged: true
451           ports:
452             # readiness + liveness probe
453             - containerPort: 9999
454           readinessProbe:
455             httpGet:
456               path: /readiness
457               port: 9999
458             periodSeconds: 3
459             timeoutSeconds: 2
460             failureThreshold: 3
461             initialDelaySeconds: 15
462           livenessProbe:
463             httpGet:
464               path: /liveness
465               port: 9999
466             periodSeconds: 3
467             timeoutSeconds: 2
468             failureThreshold: 3
469             initialDelaySeconds: 60
470           env:
471             - name: MICROSERVICE_LABEL
472               valueFrom:
473                 fieldRef:
474                   fieldPath: spec.nodeName
475             - name: HOST_IP
476               valueFrom:
477                 fieldRef:
478                   fieldPath: status.hostIP
479             - name: GOVPPMUX_NOSOCK
480               value: "1"
481             - name: CONTIV_CONFIG
482               value: "/etc/contiv/contiv.conf"
483             - name: CONTROLLER_CONFIG
484               value: "/etc/contiv/controller.conf"
485             - name: SERVICE_CONFIG
486               value: "/etc/contiv/service.conf"
487             - name: ETCD_CONFIG
488               value: "/tmp/etcd.conf"
489             - name: BOLT_CONFIG
490               value: "/etc/vpp-agent/bolt.conf"
491             # Uncomment to log graph traversal (very verbose):
492             # - name: KVSCHED_LOG_GRAPH_WALK
493             #   value: "true"
494             # Uncomment to verify effect of every transaction:
495             # - name: KVSCHED_VERIFY_MODE
496             #   value: "true"
497             - name: TELEMETRY_CONFIG
498               value: "/etc/vpp-agent/telemetry.conf"
499             - name: GOVPP_CONFIG
500               value: "/etc/vpp-agent/govpp.conf"
501             - name: LOGS_CONFIG
502               value: "/etc/vpp-agent/logs.conf"
503             - name: HTTP_CONFIG
504               value: "/etc/vpp-agent/http.conf"
505             - name: GRPC_CONFIG
506               value: "/etc/vpp-agent/grpc.conf"
507             - name: LINUX_IFPLUGIN_CONFIG
508               value: "/etc/vpp-agent/linux-ifplugin.conf"
509             - name: LINUX_L3PLUGIN_CONFIG
510               value: "/etc/vpp-agent/linux-l3plugin.conf"
511             - name: KVSCHEDULER_CONFIG
512               value: "/etc/vpp-agent/kvscheduler.conf"
513             - name: DISABLE_INTERFACE_STATS
514               value: "y"
515           volumeMounts:
516             - name: var-bolt
517               mountPath: /var/bolt
518             - name: etcd-cfg
519               mountPath: /etc/etcd
520             - name: vpp-cfg
521               mountPath: /etc/vpp
522             - name: shm
523               mountPath: /dev/shm
524             - name: dev
525               mountPath: /dev
526             - name: sys-bus-pci
527               mountPath: /sys/bus/pci
528             - name: vpp-run
529               mountPath: /run/vpp
530             - name: contiv-run
531               mountPath: /var/run/contiv
532             - name: contiv-agent-cfg
533               mountPath: /etc/contiv
534             - name: vpp-agent-cfg
535               mountPath: /etc/vpp-agent
536             - name: tmp
537               mountPath: /tmp
538             - name: core-dumps
539               mountPath: /var/contiv/dumps
540             - name: docker-socket
541               mountPath: /var/run/docker.sock
542             - name: kubelet-api
543               mountPath: /var/lib/kubelet
544           resources:
545             limits:
546               hugepages-2Mi: 512Mi
547               memory: 512Mi
548             requests:
549               cpu: 250m
550
551       volumes:
552         # Used to connect to contiv-etcd.
553         - name: etcd-cfg
554           configMap:
555             name: contiv-etcd-cfg
556         # Used to install CNI.
557         - name: cni-bin-dir
558           hostPath:
559             path: /opt/cni/bin
560         - name: cni-net-dir
561           hostPath:
562             path: /etc/cni/net.d
563         # VPP startup config folder.
564         - name: vpp-cfg
565           hostPath:
566             path: /etc/vpp
567         # To install vppctl.
568         - name: usr-local-bin
569           hostPath:
570             path: /usr/local/bin
571         # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication with VPP
572         # (/dev/shm)
573         - name: dev
574           hostPath:
575             path: /dev
576         - name: shm
577           hostPath:
578             path: /dev/shm
579         # /sys/bus/pci is required for binding PCI devices to specific drivers
580         - name: sys-bus-pci
581           hostPath:
582             path: /sys/bus/pci
583         # For CLI unix socket.
584         - name: vpp-run
585           hostPath:
586             path: /run/vpp
587         # For CNI / STN unix domain socket
588         - name: contiv-run
589           hostPath:
590             path: /var/run/contiv
591         # Used to configure contiv agent.
592         - name: contiv-agent-cfg
593           configMap:
594             name: contiv-agent-cfg
595         # Used to configure vpp agent.
596         - name: vpp-agent-cfg
597           configMap:
598             name: vpp-agent-cfg
599         # Used for vswitch core dumps
600         - name: core-dumps
601           hostPath:
602             path: /var/contiv/dumps
603         # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
604         - name: tmp
605           emptyDir:
606             medium: Memory
607         # persisted bolt data
608         - name: var-bolt
609           hostPath:
610             path: /var/bolt
611         - name: docker-socket
612           hostPath:
613             path: /var/run/docker.sock
614         # CNI config
615         - name: contiv-cni-cfg
616           configMap:
617             name: contiv-cni-cfg
618         # kubelet api dir
619         - name: kubelet-api
620           hostPath:
621             path: /var/lib/kubelet
622
623 ---
624
625 # This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
626 apiVersion: extensions/v1beta1
627 kind: DaemonSet
628 metadata:
629   name: contiv-ksr
630   namespace: kube-system
631   labels:
632     k8s-app: contiv-ksr
633 spec:
634   updateStrategy:
635     type: RollingUpdate
636   template:
637     metadata:
638       labels:
639         k8s-app: contiv-ksr
640       annotations:
641         # Marks this pod as a critical add-on.
642         scheduler.alpha.kubernetes.io/critical-pod: ''
643     spec:
644       tolerations:
645         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
646         - key: ''
647           operator: Exists
648           effect: ''
649         # This likely isn't needed due to the above wildcard, but keep it in for now.
650         - key: CriticalAddonsOnly
651           operator: Exists
652       # Only run this pod on the master.
653       nodeSelector:
654         node-role.kubernetes.io/master: ""
655       hostNetwork: true
656       # This grants the required permissions to contiv-ksr.
657       serviceAccountName: contiv-ksr
658
659       initContainers:
660         # This init container waits until etcd is started
661         - name: wait-foretcd
662           env:
663             - name: ETCDPORT
664               value: "32379"
665             - name: HOST_IP
666               valueFrom:
667                 fieldRef:
668                   fieldPath: status.hostIP
669           image: arm64v8/busybox:1.29.3
670           imagePullPolicy: IfNotPresent
671           command:
672             - /bin/sh
673           args:
674             - -c
675             - |
676               cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
677               echo "$HOST_IP" | grep -q ':'
678               if [ "$?" -eq "0" ];
679               then
680                  HOST_IP="[$HOST_IP]"
681               fi
682               sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
683               until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
684           volumeMounts:
685             - name: tmp-cfg
686               mountPath: /tmp/cfg
687             - name: etcd-cfg
688               mountPath: /etc/etcd
689
690
691       containers:
692         - name: contiv-ksr
693           image: iecedge/ksr-arm64:v3.2.1-macbin
694           imagePullPolicy: IfNotPresent
695           env:
696             - name: ETCD_CONFIG
697               value: "/tmp/cfg/etcd.conf"
698             - name: HTTP_CONFIG
699               value: "/etc/http/http.conf"
700           volumeMounts:
701             - name: tmp-cfg
702               mountPath: /tmp/cfg
703             - name: http-cfg
704               mountPath: /etc/http
705           readinessProbe:
706             httpGet:
707               path: /readiness
708               port: 9191
709             periodSeconds: 3
710             timeoutSeconds: 2
711             failureThreshold: 3
712             initialDelaySeconds: 10
713           livenessProbe:
714             httpGet:
715               path: /liveness
716               port: 9191
717             periodSeconds: 3
718             timeoutSeconds: 2
719             failureThreshold: 3
720             initialDelaySeconds: 30
721           resources:
722             requests:
723               cpu: 100m
724       volumes:
725         # Used to connect to contiv-etcd.
726         - name: etcd-cfg
727           configMap:
728             name: contiv-etcd-withcompact-cfg
729         - name: tmp-cfg
730           emptyDir: {}
731         - name: http-cfg
732           configMap:
733             name: contiv-ksr-http-cfg
734
735 ---
736
737 # This cluster role defines a set of permissions required for contiv-ksr.
738 apiVersion: rbac.authorization.k8s.io/v1beta1
739 kind: ClusterRole
740 metadata:
741   name: contiv-ksr
742   namespace: kube-system
743 rules:
744   - apiGroups:
745       - ""
746       - extensions
747     resources:
748       - pods
749       - namespaces
750       - networkpolicies
751       - services
752       - endpoints
753       - nodes
754     verbs:
755       - watch
756       - list
757
758 ---
759
760 # This defines a service account for contiv-ksr.
761 apiVersion: v1
762 kind: ServiceAccount
763 metadata:
764   name: contiv-ksr
765   namespace: kube-system
766
767 ---
768
769 # This binds the contiv-ksr cluster role with contiv-ksr service account.
770 apiVersion: rbac.authorization.k8s.io/v1beta1
771 kind: ClusterRoleBinding
772 metadata:
773   name: contiv-ksr
774 roleRef:
775   apiGroup: rbac.authorization.k8s.io
776   kind: ClusterRole
777   name: contiv-ksr
778 subjects:
779   - kind: ServiceAccount
780     name: contiv-ksr
781     namespace: kube-system
782
783 ---
784
785 # This installs the contiv-crd on the master node in a Kubernetes cluster.
786 apiVersion: extensions/v1beta1
787 kind: DaemonSet
788 metadata:
789   name: contiv-crd
790   namespace: kube-system
791   labels:
792     k8s-app: contiv-crd
793 spec:
794   updateStrategy:
795     type: RollingUpdate
796   template:
797     metadata:
798       labels:
799         k8s-app: contiv-crd
800       annotations:
801         # Marks this pod as a critical add-on.
802         scheduler.alpha.kubernetes.io/critical-pod: ''
803     spec:
804       tolerations:
805         # We need this to schedule on the master no matter what else is going on, so tolerate everything.
806         - key: ''
807           operator: Exists
808           effect: ''
809         # This likely isn't needed due to the above wildcard, but keep it in for now.
810         - key: CriticalAddonsOnly
811           operator: Exists
812       # Only run this pod on the master.
813       nodeSelector:
814         node-role.kubernetes.io/master: ""
815       hostNetwork: true
816       # This grants the required permissions to contiv-crd.
817       serviceAccountName: contiv-crd
818
819       initContainers:
820         # This init container waits until etcd is started
821         - name: wait-foretcd
822           env:
823             - name: ETCDPORT
824               value: "32379"
825             - name: HOST_IP
826               valueFrom:
827                 fieldRef:
828                   fieldPath: status.hostIP
829           image: arm64v8/busybox:1.29.3
830           imagePullPolicy: IfNotPresent
831           command:
832             - /bin/sh
833           args:
834             - -c
835             - |
836               cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
837               echo "$HOST_IP" | grep -q ':'
838               if [ "$?" -eq "0" ];
839               then
840                  HOST_IP="[$HOST_IP]"
841               fi
842               sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
843               until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
844           volumeMounts:
845             - name: tmp-cfg
846               mountPath: /tmp/cfg
847             - name: etcd-cfg
848               mountPath: /etc/etcd
849
850         # This init container copies contiv-netctl tool to the host.
851         - name: netctl-init
852           image: iecedge/crd-arm64:v3.2.1-macbin
853           imagePullPolicy: IfNotPresent
854           command:
855             - /bin/sh
856           args:
857             - -c
858             - |
859               echo '#!/bin/sh
860               kubectl get pods -n kube-system | \
861                 grep contiv-crd | \
862                 cut -d " " -f 1 | \
863                 xargs -I{} kubectl exec -n kube-system {} \
864                 /contiv-netctl "$@"' \
865               > /host/usr/local/bin/contiv-netctl || true
866               chmod +x /host/usr/local/bin/contiv-netctl || true
867           volumeMounts:
868             - name: usr-local-bin
869               mountPath: /host/usr/local/bin
870
871       containers:
872         - name: contiv-crd
873           image: iecedge/crd-arm64:v3.2.1-macbin
874           imagePullPolicy: IfNotPresent
875           env:
876             - name: ETCD_CONFIG
877               value: "/tmp/cfg/etcd.conf"
878             - name: HTTP_CONFIG
879               value: "/etc/http/http.conf"
880             - name: HTTP_CLIENT_CONFIG
881               value: "/etc/http/http.client.conf"
882             - name: CONTIV_CRD_VALIDATE_INTERVAL
883               value: "5"
884             - name: CONTIV_CRD_VALIDATE_STATE
885               value: "SB"
886             - name: DISABLE_NETCTL_REST
887               value: "true"
888           volumeMounts:
889             - name: tmp-cfg
890               mountPath: /tmp/cfg
891             - name: http-cfg
892               mountPath: /etc/http
893           readinessProbe:
894             httpGet:
895               path: /readiness
896               port: 9090
897             periodSeconds: 3
898             timeoutSeconds: 2
899             failureThreshold: 3
900             initialDelaySeconds: 10
901           livenessProbe:
902             httpGet:
903               path: /liveness
904               port: 9090
905             periodSeconds: 3
906             timeoutSeconds: 2
907             failureThreshold: 3
908             initialDelaySeconds: 30
909           resources:
910             requests:
911               cpu: 100m
912
913       volumes:
914         # Used to connect to contiv-etcd.
915         - name: etcd-cfg
916           configMap:
917             name: contiv-etcd-cfg
918         - name: usr-local-bin
919           hostPath:
920             path: /usr/local/bin
921         - name: http-cfg
922           configMap:
923             name: contiv-crd-http-cfg
924         - name: tmp-cfg
925           emptyDir: {}
926 ---
927
928 # This cluster role defines a set of permissions required for contiv-crd.
929 apiVersion: rbac.authorization.k8s.io/v1beta1
930 kind: ClusterRole
931 metadata:
932   name: contiv-crd
933   namespace: kube-system
934 rules:
935   - apiGroups:
936       - apiextensions.k8s.io
937       - nodeconfig.contiv.vpp
938       - telemetry.contiv.vpp
939       - contivpp.io
940     resources:
941       - customresourcedefinitions
942       - telemetryreports
943       - nodeconfigs
944       - customnetworks
945       - servicefunctionchains
946     verbs:
947       - "*"
948
949 ---
950
951 # This defines a service account for contiv-crd.
952 apiVersion: v1
953 kind: ServiceAccount
954 metadata:
955   name: contiv-crd
956   namespace: kube-system
957
958 ---
959
960 # This binds the contiv-crd cluster role with contiv-crd service account.
961 apiVersion: rbac.authorization.k8s.io/v1beta1
962 kind: ClusterRoleBinding
963 metadata:
964   name: contiv-crd
965 roleRef:
966   apiGroup: rbac.authorization.k8s.io
967   kind: ClusterRole
968   name: contiv-crd
969 subjects:
970   - kind: ServiceAccount
971     name: contiv-crd
972     namespace: kube-system
973
974 ---
975
976 apiVersion: v1
977 kind: ConfigMap
978 metadata:
979   name: contiv-crd-http-cfg
980   namespace: kube-system
981 data:
982   http.conf: |
983     endpoint: "0.0.0.0:9090"
984   http.client.conf: |
985     port: 9999
986     use-https: false