2 # Source: contiv-vpp/templates/vpp.yaml
3 # Contiv-VPP deployment YAML file. This deploys Contiv VPP networking on a Kuberntes cluster.
4 # The deployment consists of the following components:
5 # - contiv-etcd - deployed on k8s master
6 # - contiv-vswitch - deployed on each k8s node
7 # - contiv-ksr - deployed on k8s master
9 ###########################################################
11 ###########################################################
13 # This config map contains contiv-agent configuration. The most important part is the ipamConfig,
14 # which may be updated in case the default IPAM settings do not match your needs.
15 # nodeConfig may be used in case your nodes have more than 1 VPP interface. In that case, one
16 # of them needs to be marked as the main inter-node interface, and the rest of them can be
17 # configured with any IP addresses (the IPs cannot conflict with the main IPAM config).
21 name: contiv-agent-cfg
22 namespace: kube-system
26 useTAPInterfaces: true
27 tapInterfaceVersion: 2
30 tcpChecksumOffloadDisabled: true
32 natExternalTraffic: true
35 ipNeighborScanInterval: 1
36 ipNeighborStaleThreshold: 4
37 enablePacketTrace: false
38 routeServiceCIDRToVPP: false
39 crdNodeConfigurationDisabled: true
41 nodeInterconnectDHCP: false
42 nodeInterconnectCIDR: 192.168.16.0/24
43 podSubnetCIDR: 10.1.0.0/16
44 podSubnetOneNodePrefixLen: 24
45 vppHostSubnetCIDR: 172.30.0.0/16
46 vppHostSubnetOneNodePrefixLen: 24
47 vxlanCIDR: 192.168.30.0/24
50 delayRetry: 1000000000
52 enableExpBackoffRetry: true
53 delayLocalResync: 5000000000
54 startupResyncDeadline: 30000000000
55 enablePeriodicHealing: false
56 periodicHealingInterval: 30000000000
57 delayAfterErrorHealing: 5000000000
58 remoteDBProbingInterval: 3000000000
59 recordEventHistory: true
60 eventHistoryAgeLimit: 1440
61 permanentlyRecordedInitPeriod: 60
63 cleanupIdleNATSessions: true
64 tcpNATSessionTimeout: 180
65 otherNATSessionTimeout: 5
66 serviceLocalEndpointWeight: 1
67 disableNATVirtualReassembly: false
75 namespace: kube-system
78 health-check-probe-interval: 3000000000
79 health-check-reply-timeout: 500000000
80 health-check-threshold: 3
81 reply-timeout: 3000000000
85 - name: statscollector
89 - name: linux.arp-conf
95 endpoint: /var/run/contiv/cni.sock
96 force-socket-removal: true
99 endpoint: "0.0.0.0:9999"
101 db-path: /var/bolt/bolt.db
105 polling-interval: 30000000000
107 linux-ifplugin.conf: |
108 dump-go-routines-count: 5
109 linux-l3plugin.conf: |
110 dump-go-routines-count: 5
112 record-transaction-history: true
113 transaction-history-age-limit: 1440
114 permanently-recorded-init-period: 60
122 namespace: kube-system
124 # The CNI network configuration to install on each node. The special
125 # values in this config will be automatically populated.
126 10-contiv-vpp.conflist: |-
128 "name": "k8s-pod-network",
129 "cniVersion": "0.3.1",
132 "type": "contiv-cni",
133 "grpcServer": "/var/run/contiv/cni.sock",
134 "logFile": "/var/run/contiv/cni.log"
141 "externalSetMarkChain": "KUBE-MARK-MASQ"
147 ###########################################################
149 # !!! DO NOT EDIT THINGS BELOW THIS LINE !!!
151 ###########################################################
154 ###########################################################
155 # Components and other resources
156 ###########################################################
158 # This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
159 # In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
160 # kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
161 apiVersion: apps/v1beta2
164 name: contiv-etcd-amd64
165 namespace: kube-system
179 # Marks this pod as a critical add-on.
180 scheduler.alpha.kubernetes.io/critical-pod: ''
183 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
187 # This likely isn't needed due to the above wildcard, but keep it in for now.
188 - key: CriticalAddonsOnly
190 # Only run this pod on the master.
192 node-role.kubernetes.io/master: ""
193 beta.kubernetes.io/arch: amd64
198 image: quay.io/coreos/etcd:v3.3.11
199 imagePullPolicy: IfNotPresent
201 - name: CONTIV_ETCD_IP
204 fieldPath: status.podIP
208 fieldPath: status.hostIP
215 - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
216 --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
217 --listen-peer-urls=http://0.0.0.0:12380
220 mountPath: /var/etcd/
227 echo "$HOST_IP" | grep -q ':'
232 etcdctl get --endpoints=$HOST_IP:32379 /
234 initialDelaySeconds: 20
244 # This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
245 # In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
246 # kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
247 apiVersion: apps/v1beta2
250 name: contiv-etcd-arm64
251 namespace: kube-system
265 # Marks this pod as a critical add-on.
266 scheduler.alpha.kubernetes.io/critical-pod: ''
269 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
273 # This likely isn't needed due to the above wildcard, but keep it in for now.
274 - key: CriticalAddonsOnly
276 # Only run this pod on the master.
278 node-role.kubernetes.io/master: ""
279 beta.kubernetes.io/arch: arm64
284 image: quay.io/coreos/etcd:v3.3.11-arm64
285 imagePullPolicy: IfNotPresent
287 - name: CONTIV_ETCD_IP
290 fieldPath: status.podIP
294 fieldPath: status.hostIP
297 - name: ETCD_UNSUPPORTED_ARCH
303 - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
304 --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
305 --listen-peer-urls=http://0.0.0.0:12380
308 mountPath: /var/etcd/
315 echo "$HOST_IP" | grep -q ':'
320 etcdctl get --endpoints=$HOST_IP:32379 /
322 initialDelaySeconds: 20
336 namespace: kube-system
339 # Match contiv-etcd DaemonSet.
350 name: contiv-ksr-http-cfg
351 namespace: kube-system
354 endpoint: "0.0.0.0:9191"
357 # This config map contains ETCD configuration for connecting to the contiv-etcd defined above.
361 name: contiv-etcd-cfg
362 namespace: kube-system
365 insecure-transport: true
366 dial-timeout: 10000000000
367 allow-delayed-start: true
369 - "__HOST_IP__:32379"
373 # This config map contains ETCD configuration for connecting to the contiv-etcd defined above with auto compact.
377 name: contiv-etcd-withcompact-cfg
378 namespace: kube-system
381 insecure-transport: true
382 dial-timeout: 10000000000
383 auto-compact: 600000000000
384 allow-delayed-start: true
385 reconnect-interval: 2000000000
387 - "__HOST_IP__:32379"
391 # This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
392 # It consists of the following containers:
393 # - contiv-vswitch container: contains VPP and its management agent
394 # - contiv-cni container: installs CNI on the host
395 apiVersion: extensions/v1beta1
398 name: contiv-vswitch-amd64
399 namespace: kube-system
401 k8s-app: contiv-vswitch
405 k8s-app: contiv-vswitch
411 k8s-app: contiv-vswitch
413 # Marks this pod as a critical add-on.
414 scheduler.alpha.kubernetes.io/critical-pod: ''
417 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
421 # This likely isn't needed due to the above wildcard, but keep it in for now.
422 - key: CriticalAddonsOnly
425 beta.kubernetes.io/arch: amd64
429 # Init containers are executed before regular containers, must finish successfully before regular ones
432 # This container installs the Contiv CNI binaries and CNI network config file on each node.
434 image: iecedge/cni:v3.2.1
435 imagePullPolicy: IfNotPresent
440 - mountPath: /opt/cni/bin
442 - mountPath: /etc/cni/net.d
444 - mountPath: /cni/cfg
446 - mountPath: /var/run/contiv
449 # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
451 image: iecedge/vswitch:v3.2.1
452 imagePullPolicy: IfNotPresent
457 fieldPath: status.hostIP
465 rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
466 if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
467 cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
469 if [ ! -d /var/run/contiv ]; then
470 mkdir /var/run/contiv
472 chmod 700 /var/run/contiv
473 rm -f /var/run/contiv/cni.sock
474 if ip link show vpp1 >/dev/null 2>&1; then
477 cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
478 sysctl -w debug.exception-trace=1
479 sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
481 echo 2 > /proc/sys/fs/suid_dumpable
482 # replace localhost IP by node IP since node port doesn't work
483 # on localhost IP in a certain scenario
484 cp /etc/etcd/etcd.conf /tmp/etcd.conf
486 echo "$HOST_IP" | grep -q ':'
491 sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
496 - name: usr-local-bin
497 mountPath: /host/usr/local/bin
499 mountPath: /host/etc/vpp
505 mountPath: /var/run/contiv
511 mountPath: /var/contiv/dumps
514 # Runs contiv-vswitch container on each Kubernetes node.
515 # It contains the vSwitch VPP and its management agent.
516 - name: contiv-vswitch
517 image: iecedge/vswitch:v3.2.1
518 imagePullPolicy: IfNotPresent
522 # readiness + liveness probe
523 - containerPort: 9999
531 initialDelaySeconds: 15
539 initialDelaySeconds: 60
541 - name: MICROSERVICE_LABEL
544 fieldPath: spec.nodeName
548 fieldPath: status.hostIP
549 - name: GOVPPMUX_NOSOCK
551 - name: CONTIV_CONFIG
552 value: "/etc/contiv/contiv.conf"
553 - name: CONTROLLER_CONFIG
554 value: "/etc/contiv/controller.conf"
555 - name: SERVICE_CONFIG
556 value: "/etc/contiv/service.conf"
558 value: "/tmp/etcd.conf"
560 value: "/etc/vpp-agent/bolt.conf"
561 # Uncomment to log graph traversal (very verbose):
562 # - name: KVSCHED_LOG_GRAPH_WALK
564 # Uncomment to verify effect of every transaction:
565 # - name: KVSCHED_VERIFY_MODE
567 - name: TELEMETRY_CONFIG
568 value: "/etc/vpp-agent/telemetry.conf"
570 value: "/etc/vpp-agent/govpp.conf"
572 value: "/etc/vpp-agent/logs.conf"
574 value: "/etc/vpp-agent/http.conf"
576 value: "/etc/vpp-agent/grpc.conf"
577 - name: LINUX_IFPLUGIN_CONFIG
578 value: "/etc/vpp-agent/linux-ifplugin.conf"
579 - name: LINUX_L3PLUGIN_CONFIG
580 value: "/etc/vpp-agent/linux-l3plugin.conf"
581 - name: KVSCHEDULER_CONFIG
582 value: "/etc/vpp-agent/kvscheduler.conf"
595 mountPath: /sys/bus/pci
599 mountPath: /var/run/contiv
600 - name: contiv-agent-cfg
601 mountPath: /etc/contiv
602 - name: vpp-agent-cfg
603 mountPath: /etc/vpp-agent
607 mountPath: /var/contiv/dumps
608 - name: docker-socket
609 mountPath: /var/run/docker.sock
615 # Used to connect to contiv-etcd.
618 name: contiv-etcd-cfg
619 # Used to install CNI.
626 # VPP startup config folder.
631 - name: usr-local-bin
634 # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication
635 # with VPP (/dev/shm)
642 # /sys/bus/pci is required for binding PCI devices to specific drivers
646 # For CLI unix socket.
650 # For CNI / STN unix domain socket
653 path: /var/run/contiv
654 # Used to configure contiv agent.
655 - name: contiv-agent-cfg
657 name: contiv-agent-cfg
658 # Used to configure vpp agent.
659 - name: vpp-agent-cfg
662 # Used for vswitch core dumps
665 path: /var/contiv/dumps
666 # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
670 # persisted bolt data
674 - name: docker-socket
676 path: /var/run/docker.sock
678 - name: contiv-cni-cfg
683 # This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
684 # It consists of the following containers:
685 # - contiv-vswitch container: contains VPP and its management agent
686 # - contiv-cni container: installs CNI on the host
687 apiVersion: extensions/v1beta1
690 name: contiv-vswitch-arm64
691 namespace: kube-system
693 k8s-app: contiv-vswitch
697 k8s-app: contiv-vswitch
703 k8s-app: contiv-vswitch
705 # Marks this pod as a critical add-on.
706 scheduler.alpha.kubernetes.io/critical-pod: ''
709 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
713 # This likely isn't needed due to the above wildcard, but keep it in for now.
714 - key: CriticalAddonsOnly
717 beta.kubernetes.io/arch: arm64
721 # Init containers are executed before regular containers, must finish successfully before regular ones
724 # This container installs the Contiv CNI binaries and CNI network config file on each node.
726 image: iecedge/cni-arm64:v3.2.1
727 imagePullPolicy: IfNotPresent
732 - mountPath: /opt/cni/bin
734 - mountPath: /etc/cni/net.d
736 - mountPath: /cni/cfg
738 - mountPath: /var/run/contiv
741 # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
743 image: iecedge/vswitch-arm64:v3.2.1
744 imagePullPolicy: IfNotPresent
749 fieldPath: status.hostIP
757 rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
758 if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
759 cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
761 if [ ! -d /var/run/contiv ]; then
762 mkdir /var/run/contiv
764 chmod 700 /var/run/contiv
765 rm -f /var/run/contiv/cni.sock
766 if ip link show vpp1 >/dev/null 2>&1; then
769 cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
770 sysctl -w debug.exception-trace=1
771 sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
773 echo 2 > /proc/sys/fs/suid_dumpable
774 # replace localhost IP by node IP since node port doesn't work
775 # on localhost IP in a certain scenario
776 cp /etc/etcd/etcd.conf /tmp/etcd.conf
778 echo "$HOST_IP" | grep -q ':'
783 sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
788 - name: usr-local-bin
789 mountPath: /host/usr/local/bin
791 mountPath: /host/etc/vpp
797 mountPath: /var/run/contiv
803 mountPath: /var/contiv/dumps
806 # Runs contiv-vswitch container on each Kubernetes node.
807 # It contains the vSwitch VPP and its management agent.
808 - name: contiv-vswitch
809 image: iecedge/vswitch-arm64:v3.2.1
810 imagePullPolicy: IfNotPresent
814 # readiness + liveness probe
815 - containerPort: 9999
823 initialDelaySeconds: 15
831 initialDelaySeconds: 60
833 - name: MICROSERVICE_LABEL
836 fieldPath: spec.nodeName
840 fieldPath: status.hostIP
841 - name: GOVPPMUX_NOSOCK
843 - name: CONTIV_CONFIG
844 value: "/etc/contiv/contiv.conf"
845 - name: CONTROLLER_CONFIG
846 value: "/etc/contiv/controller.conf"
847 - name: SERVICE_CONFIG
848 value: "/etc/contiv/service.conf"
850 value: "/tmp/etcd.conf"
852 value: "/etc/vpp-agent/bolt.conf"
853 # Uncomment to log graph traversal (very verbose):
854 # - name: KVSCHED_LOG_GRAPH_WALK
856 # Uncomment to verify effect of every transaction:
857 # - name: KVSCHED_VERIFY_MODE
859 - name: TELEMETRY_CONFIG
860 value: "/etc/vpp-agent/telemetry.conf"
862 value: "/etc/vpp-agent/govpp.conf"
864 value: "/etc/vpp-agent/logs.conf"
866 value: "/etc/vpp-agent/http.conf"
868 value: "/etc/vpp-agent/grpc.conf"
869 - name: LINUX_IFPLUGIN_CONFIG
870 value: "/etc/vpp-agent/linux-ifplugin.conf"
871 - name: LINUX_L3PLUGIN_CONFIG
872 value: "/etc/vpp-agent/linux-l3plugin.conf"
873 - name: KVSCHEDULER_CONFIG
874 value: "/etc/vpp-agent/kvscheduler.conf"
887 mountPath: /sys/bus/pci
891 mountPath: /var/run/contiv
892 - name: contiv-agent-cfg
893 mountPath: /etc/contiv
894 - name: vpp-agent-cfg
895 mountPath: /etc/vpp-agent
899 mountPath: /var/contiv/dumps
900 - name: docker-socket
901 mountPath: /var/run/docker.sock
907 # Used to connect to contiv-etcd.
910 name: contiv-etcd-cfg
911 # Used to install CNI.
918 # VPP startup config folder.
923 - name: usr-local-bin
926 # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication with
934 # /sys/bus/pci is required for binding PCI devices to specific drivers
938 # For CLI unix socket.
942 # For CNI / STN unix domain socket
945 path: /var/run/contiv
946 # Used to configure contiv agent.
947 - name: contiv-agent-cfg
949 name: contiv-agent-cfg
950 # Used to configure vpp agent.
951 - name: vpp-agent-cfg
954 # Used for vswitch core dumps
957 path: /var/contiv/dumps
958 # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
962 # persisted bolt data
966 - name: docker-socket
968 path: /var/run/docker.sock
970 - name: contiv-cni-cfg
975 # This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
976 apiVersion: extensions/v1beta1
979 name: contiv-ksr-amd64
980 namespace: kube-system
991 # Marks this pod as a critical add-on.
992 scheduler.alpha.kubernetes.io/critical-pod: ''
995 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
999 # This likely isn't needed due to the above wildcard, but keep it in for now.
1000 - key: CriticalAddonsOnly
1002 # Only run this pod on the master.
1004 node-role.kubernetes.io/master: ""
1005 beta.kubernetes.io/arch: amd64
1007 # This grants the required permissions to contiv-ksr.
1008 serviceAccountName: contiv-ksr
1011 # This init container waits until etcd is started
1012 - name: wait-foretcd
1019 fieldPath: status.hostIP
1020 image: busybox:1.29.3
1021 imagePullPolicy: IfNotPresent
1027 cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
1028 echo "$HOST_IP" | grep -q ':'
1029 if [ "$?" -eq "0" ];
1031 HOST_IP="[$HOST_IP]"
1033 sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
1034 until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
1039 mountPath: /etc/etcd
1044 image: iecedge/ksr:v3.2.1
1045 imagePullPolicy: IfNotPresent
1048 value: "/tmp/cfg/etcd.conf"
1050 value: "/etc/http/http.conf"
1055 mountPath: /etc/http
1063 initialDelaySeconds: 10
1071 initialDelaySeconds: 30
1076 # Used to connect to contiv-etcd.
1079 name: contiv-etcd-withcompact-cfg
1084 name: contiv-ksr-http-cfg
1087 # This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
1088 apiVersion: extensions/v1beta1
1091 name: contiv-ksr-arm64
1092 namespace: kube-system
1103 # Marks this pod as a critical add-on.
1104 scheduler.alpha.kubernetes.io/critical-pod: ''
1107 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
1111 # This likely isn't needed due to the above wildcard, but keep it in for now.
1112 - key: CriticalAddonsOnly
1114 # Only run this pod on the master.
1116 node-role.kubernetes.io/master: ""
1117 beta.kubernetes.io/arch: arm64
1119 # This grants the required permissions to contiv-ksr.
1120 serviceAccountName: contiv-ksr
1123 # This init container waits until etcd is started
1124 - name: wait-foretcd
1131 fieldPath: status.hostIP
1132 image: arm64v8/busybox:1.29.3
1133 imagePullPolicy: IfNotPresent
1139 cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
1140 echo "$HOST_IP" | grep -q ':'
1141 if [ "$?" -eq "0" ];
1143 HOST_IP="[$HOST_IP]"
1145 sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
1146 until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
1151 mountPath: /etc/etcd
1156 image: iecedge/ksr-arm64:v3.2.1
1157 imagePullPolicy: IfNotPresent
1160 value: "/tmp/cfg/etcd.conf"
1162 value: "/etc/http/http.conf"
1167 mountPath: /etc/http
1175 initialDelaySeconds: 10
1183 initialDelaySeconds: 30
1188 # Used to connect to contiv-etcd.
1191 name: contiv-etcd-withcompact-cfg
1196 name: contiv-ksr-http-cfg
1200 # This cluster role defines a set of permissions required for contiv-ksr.
1201 apiVersion: rbac.authorization.k8s.io/v1beta1
1205 namespace: kube-system
1223 # This defines a service account for contiv-ksr.
1225 kind: ServiceAccount
1228 namespace: kube-system
1232 # This binds the contiv-ksr cluster role with contiv-ksr service account.
1233 apiVersion: rbac.authorization.k8s.io/v1beta1
1234 kind: ClusterRoleBinding
1238 apiGroup: rbac.authorization.k8s.io
1242 - kind: ServiceAccount
1244 namespace: kube-system
1248 # This installs the contiv-crd on the master node in a Kubernetes cluster.
1249 apiVersion: extensions/v1beta1
1252 name: contiv-crd-amd64
1253 namespace: kube-system
1264 # Marks this pod as a critical add-on.
1265 scheduler.alpha.kubernetes.io/critical-pod: ''
1268 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
1272 # This likely isn't needed due to the above wildcard, but keep it in for now.
1273 - key: CriticalAddonsOnly
1275 # Only run this pod on the master.
1277 node-role.kubernetes.io/master: ""
1278 beta.kubernetes.io/arch: amd64
1280 # This grants the required permissions to contiv-crd.
1281 serviceAccountName: contiv-crd
1284 # This init container waits until etcd is started
1285 - name: wait-foretcd
1292 fieldPath: status.hostIP
1293 image: busybox:1.29.3
1294 imagePullPolicy: IfNotPresent
1300 cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
1301 echo "$HOST_IP" | grep -q ':'
1302 if [ "$?" -eq "0" ];
1304 HOST_IP="[$HOST_IP]"
1306 sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
1307 until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
1312 mountPath: /etc/etcd
1314 # This init container copies contiv-netctl tool to the host.
1316 image: iecedge/crd:v3.2.1
1317 imagePullPolicy: IfNotPresent
1324 kubectl get pods -n kube-system | \
1327 xargs -I{} kubectl exec -n kube-system {} \
1328 /contiv-netctl "$@"' \
1329 > /host/usr/local/bin/contiv-netctl || true
1330 chmod +x /host/usr/local/bin/contiv-netctl || true
1332 - name: usr-local-bin
1333 mountPath: /host/usr/local/bin
1337 image: iecedge/crd:v3.2.1
1338 imagePullPolicy: IfNotPresent
1341 value: "/tmp/cfg/etcd.conf"
1343 value: "/etc/http/http.conf"
1344 - name: HTTP_CLIENT_CONFIG
1345 value: "/etc/http/http.client.conf"
1346 - name: CONTIV_CRD_VALIDATE_INTERVAL
1348 - name: CONTIV_CRD_VALIDATE_STATE
1350 - name: DISABLE_NETCTL_REST
1356 mountPath: /etc/http
1364 initialDelaySeconds: 10
1372 initialDelaySeconds: 30
1378 # Used to connect to contiv-etcd.
1381 name: contiv-etcd-cfg
1382 - name: usr-local-bin
1384 path: /usr/local/bin
1387 name: contiv-crd-http-cfg
1391 # This installs the contiv-crd on the master node in a Kubernetes cluster.
1392 apiVersion: extensions/v1beta1
1395 name: contiv-crd-arm64
1396 namespace: kube-system
1407 # Marks this pod as a critical add-on.
1408 scheduler.alpha.kubernetes.io/critical-pod: ''
1411 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
1415 # This likely isn't needed due to the above wildcard, but keep it in for now.
1416 - key: CriticalAddonsOnly
1418 # Only run this pod on the master.
1420 node-role.kubernetes.io/master: ""
1421 beta.kubernetes.io/arch: arm64
1423 # This grants the required permissions to contiv-crd.
1424 serviceAccountName: contiv-crd
1427 # This init container waits until etcd is started
1428 - name: wait-foretcd
1435 fieldPath: status.hostIP
1436 image: arm64v8/busybox:1.29.3
1437 imagePullPolicy: IfNotPresent
1443 cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
1444 echo "$HOST_IP" | grep -q ':'
1445 if [ "$?" -eq "0" ];
1447 HOST_IP="[$HOST_IP]"
1449 sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
1450 until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
1455 mountPath: /etc/etcd
1457 # This init container copies contiv-netctl tool to the host.
1459 image: iecedge/crd-arm64:v3.2.1
1460 imagePullPolicy: IfNotPresent
1467 kubectl get pods -n kube-system | \
1470 xargs -I{} kubectl exec -n kube-system {} \
1471 /contiv-netctl "$@"' \
1472 > /host/usr/local/bin/contiv-netctl || true
1473 chmod +x /host/usr/local/bin/contiv-netctl || true
1475 - name: usr-local-bin
1476 mountPath: /host/usr/local/bin
1480 image: iecedge/crd-arm64:v3.2.1
1481 imagePullPolicy: IfNotPresent
1484 value: "/tmp/cfg/etcd.conf"
1486 value: "/etc/http/http.conf"
1487 - name: HTTP_CLIENT_CONFIG
1488 value: "/etc/http/http.client.conf"
1489 - name: CONTIV_CRD_VALIDATE_INTERVAL
1491 - name: CONTIV_CRD_VALIDATE_STATE
1493 - name: DISABLE_NETCTL_REST
1499 mountPath: /etc/http
1507 initialDelaySeconds: 10
1515 initialDelaySeconds: 30
1521 # Used to connect to contiv-etcd.
1524 name: contiv-etcd-cfg
1525 - name: usr-local-bin
1527 path: /usr/local/bin
1530 name: contiv-crd-http-cfg
1535 # This cluster role defines a set of permissions required for contiv-crd.
1536 apiVersion: rbac.authorization.k8s.io/v1beta1
1540 namespace: kube-system
1543 - apiextensions.k8s.io
1544 - nodeconfig.contiv.vpp
1545 - telemetry.contiv.vpp
1547 - customresourcedefinitions
1555 # This defines a service account for contiv-crd.
1557 kind: ServiceAccount
1560 namespace: kube-system
1564 # This binds the contiv-crd cluster role with contiv-crd service account.
1565 apiVersion: rbac.authorization.k8s.io/v1beta1
1566 kind: ClusterRoleBinding
1570 apiGroup: rbac.authorization.k8s.io
1574 - kind: ServiceAccount
1576 namespace: kube-system
1583 name: contiv-crd-http-cfg
1584 namespace: kube-system
1587 endpoint: "0.0.0.0:9090"