2 # Source: contiv-vpp/templates/vpp.yaml
3 # Contiv-VPP deployment YAML file. This deploys Contiv VPP networking on a Kuberntes cluster.
4 # The deployment consists of the following components:
5 # - contiv-etcd - deployed on k8s master
6 # - contiv-vswitch - deployed on each k8s node
7 # - contiv-ksr - deployed on k8s master
9 ###########################################################
11 ###########################################################
13 # This config map contains contiv-agent configuration. The most important part is the ipamConfig,
14 # which may be updated in case the default IPAM settings do not match your needs.
15 # nodeConfig may be used in case your nodes have more than 1 VPP interface. In that case, one
16 # of them needs to be marked as the main inter-node interface, and the rest of them can be
17 # configured with any IP addresses (the IPs cannot conflict with the main IPAM config).
21 name: contiv-agent-cfg
22 namespace: kube-system
25 nodeToNodeTransport: vxlan
26 useSRv6ForServices: false
27 useTAPInterfaces: true
28 tapInterfaceVersion: 2
32 tcpChecksumOffloadDisabled: true
34 natExternalTraffic: true
37 ipNeighborScanInterval: 1
38 ipNeighborStaleThreshold: 4
39 enablePacketTrace: false
40 routeServiceCIDRToVPP: false
41 crdNodeConfigurationDisabled: true
43 nodeInterconnectDHCP: false
44 nodeInterconnectCIDR: 192.168.16.0/24
45 podSubnetCIDR: 10.1.0.0/16
46 podSubnetOneNodePrefixLen: 24
47 vppHostSubnetCIDR: 172.30.0.0/16
48 vppHostSubnetOneNodePrefixLen: 24
49 vxlanCIDR: 192.168.30.0/24
51 servicePolicyBSIDSubnetCIDR: 8fff::/16
52 servicePodLocalSIDSubnetCIDR: 9300::/16
53 serviceHostLocalSIDSubnetCIDR: 9300::/16
54 serviceNodeLocalSIDSubnetCIDR: 9000::/16
55 nodeToNodePodLocalSIDSubnetCIDR: 9501::/16
56 nodeToNodeHostLocalSIDSubnetCIDR: 9500::/16
57 nodeToNodePodPolicySIDSubnetCIDR: 8501::/16
58 nodeToNodeHostPolicySIDSubnetCIDR: 8500::/16
60 - nodeName: net-arm-mcbin-iec
62 interfaceName: mv-ppio-0/0
63 - nodeName: net-arm-mcbin-iec-1
65 interfaceName: mv-ppio-0/0
68 delayRetry: 1000000000
70 enableExpBackoffRetry: true
71 delayLocalResync: 5000000000
72 startupResyncDeadline: 30000000000
73 enablePeriodicHealing: false
74 periodicHealingInterval: 30000000000
75 delayAfterErrorHealing: 5000000000
76 remoteDBProbingInterval: 3000000000
77 recordEventHistory: true
78 eventHistoryAgeLimit: 60
79 permanentlyRecordedInitPeriod: 10
81 cleanupIdleNATSessions: true
82 tcpNATSessionTimeout: 180
83 otherNATSessionTimeout: 5
84 serviceLocalEndpointWeight: 1
85 disableNATVirtualReassembly: false
93 namespace: kube-system
96 health-check-probe-interval: 3000000000
97 health-check-reply-timeout: 500000000
98 health-check-threshold: 3
99 reply-timeout: 3000000000
103 - name: statscollector
107 - name: linux.arp-conf
113 endpoint: /var/run/contiv/cni.sock
114 force-socket-removal: true
117 endpoint: "0.0.0.0:9999"
119 db-path: /var/bolt/bolt.db
123 polling-interval: 30000000000
125 linux-ifplugin.conf: |
126 dump-go-routines-count: 5
127 linux-l3plugin.conf: |
128 dump-go-routines-count: 5
130 record-transaction-history: true
131 transaction-history-age-limit: 60
132 permanently-recorded-init-period: 10
140 namespace: kube-system
142 # The CNI network configuration to install on each node. The special
143 # values in this config will be automatically populated.
144 10-contiv-vpp.conflist: |-
146 "name": "k8s-pod-network",
147 "cniVersion": "0.3.1",
150 "type": "contiv-cni",
151 "grpcServer": "/var/run/contiv/cni.sock",
152 "logFile": "/var/run/contiv/cni.log"
159 "externalSetMarkChain": "KUBE-MARK-MASQ"
165 ###########################################################
167 # !!! DO NOT EDIT THINGS BELOW THIS LINE !!!
169 ###########################################################
172 ###########################################################
173 # Components and other resources
174 ###########################################################
176 # This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
177 # In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
178 # kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
179 apiVersion: apps/v1beta2
183 namespace: kube-system
187 serviceName: contiv-etcd
198 # Marks this pod as a critical add-on.
199 scheduler.alpha.kubernetes.io/critical-pod: ''
202 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
206 # This likely isn't needed due to the above wildcard, but keep it in for now.
207 - key: CriticalAddonsOnly
209 # Only run this pod on the master.
211 node-role.kubernetes.io/master: ""
216 image: quay.io/coreos/etcd:v3.3.11-arm64
217 imagePullPolicy: IfNotPresent
219 - name: CONTIV_ETCD_IP
222 fieldPath: status.podIP
226 fieldPath: status.hostIP
229 - name: ETCD_UNSUPPORTED_ARCH
235 - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
236 --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
237 --listen-peer-urls=http://0.0.0.0:12380
240 mountPath: /var/etcd/
247 echo "$HOST_IP" | grep -q ':'
252 etcdctl get --endpoints=$HOST_IP:32379 /
254 initialDelaySeconds: 20
269 namespace: kube-system
272 # Match contiv-etcd DaemonSet.
282 name: contiv-ksr-http-cfg
283 namespace: kube-system
286 endpoint: "0.0.0.0:9191"
289 # This config map contains ETCD configuration for connecting to the contiv-etcd defined above.
293 name: contiv-etcd-cfg
294 namespace: kube-system
297 dial-timeout: 10000000000
298 allow-delayed-start: true
299 insecure-transport: true
301 - "__HOST_IP__:32379"
305 # This config map contains ETCD configuration for connecting to the contiv-etcd defined above with auto compact.
309 name: contiv-etcd-withcompact-cfg
310 namespace: kube-system
313 insecure-transport: true
314 dial-timeout: 10000000000
315 auto-compact: 600000000000
316 allow-delayed-start: true
317 reconnect-interval: 2000000000
319 - "__HOST_IP__:32379"
323 # This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
324 # It consists of the following containers:
325 # - contiv-vswitch container: contains VPP and its management agent
326 # - contiv-cni container: installs CNI on the host
327 apiVersion: extensions/v1beta1
331 namespace: kube-system
333 k8s-app: contiv-vswitch
337 k8s-app: contiv-vswitch
343 k8s-app: contiv-vswitch
345 # Marks this pod as a critical add-on.
346 scheduler.alpha.kubernetes.io/critical-pod: ''
349 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
353 # This likely isn't needed due to the above wildcard, but keep it in for now.
354 - key: CriticalAddonsOnly
359 # Init containers are executed before regular containers, must finish successfully before regular
362 # This container installs the Contiv CNI binaries and CNI network config file on each node.
364 image: iecedge/cni-arm64:v3.2.1-macbin
365 imagePullPolicy: IfNotPresent
370 - mountPath: /opt/cni/bin
372 - mountPath: /etc/cni/net.d
374 - mountPath: /cni/cfg
376 - mountPath: /var/run/contiv
379 # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
381 image: iecedge/vswitch-arm64:v3.2.1-macbin
382 imagePullPolicy: IfNotPresent
387 fieldPath: status.hostIP
395 rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
396 if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
397 cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
399 if [ ! -d /var/run/contiv ]; then
400 mkdir /var/run/contiv
402 chmod 700 /var/run/contiv
403 rm -f /var/run/contiv/cni.sock
404 if ip link show vpp1 >/dev/null 2>&1; then
407 cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
408 sysctl -w debug.exception-trace=1
409 sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
411 echo 2 > /proc/sys/fs/suid_dumpable
412 # replace localhost IP by node IP since node port doesn't work
413 # on localhost IP in a certain scenario
414 cp /etc/etcd/etcd.conf /tmp/etcd.conf
416 echo "$HOST_IP" | grep -q ':'
421 sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
426 - name: usr-local-bin
427 mountPath: /host/usr/local/bin
429 mountPath: /host/etc/vpp
435 mountPath: /var/run/contiv
441 mountPath: /var/contiv/dumps
444 # Runs contiv-vswitch container on each Kubernetes node.
445 # It contains the vSwitch VPP and its management agent.
446 - name: contiv-vswitch
447 image: iecedge/vswitch-arm64:v3.2.1-macbin
448 imagePullPolicy: IfNotPresent
452 # readiness + liveness probe
453 - containerPort: 9999
461 initialDelaySeconds: 15
469 initialDelaySeconds: 60
471 - name: MICROSERVICE_LABEL
474 fieldPath: spec.nodeName
478 fieldPath: status.hostIP
479 - name: GOVPPMUX_NOSOCK
481 - name: CONTIV_CONFIG
482 value: "/etc/contiv/contiv.conf"
483 - name: CONTROLLER_CONFIG
484 value: "/etc/contiv/controller.conf"
485 - name: SERVICE_CONFIG
486 value: "/etc/contiv/service.conf"
488 value: "/tmp/etcd.conf"
490 value: "/etc/vpp-agent/bolt.conf"
491 # Uncomment to log graph traversal (very verbose):
492 # - name: KVSCHED_LOG_GRAPH_WALK
494 # Uncomment to verify effect of every transaction:
495 # - name: KVSCHED_VERIFY_MODE
497 - name: TELEMETRY_CONFIG
498 value: "/etc/vpp-agent/telemetry.conf"
500 value: "/etc/vpp-agent/govpp.conf"
502 value: "/etc/vpp-agent/logs.conf"
504 value: "/etc/vpp-agent/http.conf"
506 value: "/etc/vpp-agent/grpc.conf"
507 - name: LINUX_IFPLUGIN_CONFIG
508 value: "/etc/vpp-agent/linux-ifplugin.conf"
509 - name: LINUX_L3PLUGIN_CONFIG
510 value: "/etc/vpp-agent/linux-l3plugin.conf"
511 - name: KVSCHEDULER_CONFIG
512 value: "/etc/vpp-agent/kvscheduler.conf"
513 - name: DISABLE_INTERFACE_STATS
527 mountPath: /sys/bus/pci
531 mountPath: /var/run/contiv
532 - name: contiv-agent-cfg
533 mountPath: /etc/contiv
534 - name: vpp-agent-cfg
535 mountPath: /etc/vpp-agent
539 mountPath: /var/contiv/dumps
540 - name: docker-socket
541 mountPath: /var/run/docker.sock
543 mountPath: /var/lib/kubelet
552 # Used to connect to contiv-etcd.
555 name: contiv-etcd-cfg
556 # Used to install CNI.
563 # VPP startup config folder.
568 - name: usr-local-bin
571 # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication with VPP
579 # /sys/bus/pci is required for binding PCI devices to specific drivers
583 # For CLI unix socket.
587 # For CNI / STN unix domain socket
590 path: /var/run/contiv
591 # Used to configure contiv agent.
592 - name: contiv-agent-cfg
594 name: contiv-agent-cfg
595 # Used to configure vpp agent.
596 - name: vpp-agent-cfg
599 # Used for vswitch core dumps
602 path: /var/contiv/dumps
603 # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
607 # persisted bolt data
611 - name: docker-socket
613 path: /var/run/docker.sock
615 - name: contiv-cni-cfg
621 path: /var/lib/kubelet
625 # This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
626 apiVersion: extensions/v1beta1
630 namespace: kube-system
641 # Marks this pod as a critical add-on.
642 scheduler.alpha.kubernetes.io/critical-pod: ''
645 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
649 # This likely isn't needed due to the above wildcard, but keep it in for now.
650 - key: CriticalAddonsOnly
652 # Only run this pod on the master.
654 node-role.kubernetes.io/master: ""
656 # This grants the required permissions to contiv-ksr.
657 serviceAccountName: contiv-ksr
660 # This init container waits until etcd is started
668 fieldPath: status.hostIP
669 image: arm64v8/busybox:1.29.3
670 imagePullPolicy: IfNotPresent
676 cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
677 echo "$HOST_IP" | grep -q ':'
682 sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
683 until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
693 image: iecedge/ksr-arm64:v3.2.1-macbin
694 imagePullPolicy: IfNotPresent
697 value: "/tmp/cfg/etcd.conf"
699 value: "/etc/http/http.conf"
712 initialDelaySeconds: 10
720 initialDelaySeconds: 30
725 # Used to connect to contiv-etcd.
728 name: contiv-etcd-withcompact-cfg
733 name: contiv-ksr-http-cfg
737 # This cluster role defines a set of permissions required for contiv-ksr.
738 apiVersion: rbac.authorization.k8s.io/v1beta1
742 namespace: kube-system
760 # This defines a service account for contiv-ksr.
765 namespace: kube-system
769 # This binds the contiv-ksr cluster role with contiv-ksr service account.
770 apiVersion: rbac.authorization.k8s.io/v1beta1
771 kind: ClusterRoleBinding
775 apiGroup: rbac.authorization.k8s.io
779 - kind: ServiceAccount
781 namespace: kube-system
785 # This installs the contiv-crd on the master node in a Kubernetes cluster.
786 apiVersion: extensions/v1beta1
790 namespace: kube-system
801 # Marks this pod as a critical add-on.
802 scheduler.alpha.kubernetes.io/critical-pod: ''
805 # We need this to schedule on the master no matter what else is going on, so tolerate everything.
809 # This likely isn't needed due to the above wildcard, but keep it in for now.
810 - key: CriticalAddonsOnly
812 # Only run this pod on the master.
814 node-role.kubernetes.io/master: ""
816 # This grants the required permissions to contiv-crd.
817 serviceAccountName: contiv-crd
820 # This init container waits until etcd is started
828 fieldPath: status.hostIP
829 image: arm64v8/busybox:1.29.3
830 imagePullPolicy: IfNotPresent
836 cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
837 echo "$HOST_IP" | grep -q ':'
842 sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
843 until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
850 # This init container copies contiv-netctl tool to the host.
852 image: iecedge/crd-arm64:v3.2.1-macbin
853 imagePullPolicy: IfNotPresent
860 kubectl get pods -n kube-system | \
863 xargs -I{} kubectl exec -n kube-system {} \
864 /contiv-netctl "$@"' \
865 > /host/usr/local/bin/contiv-netctl || true
866 chmod +x /host/usr/local/bin/contiv-netctl || true
868 - name: usr-local-bin
869 mountPath: /host/usr/local/bin
873 image: iecedge/crd-arm64:v3.2.1-macbin
874 imagePullPolicy: IfNotPresent
877 value: "/tmp/cfg/etcd.conf"
879 value: "/etc/http/http.conf"
880 - name: HTTP_CLIENT_CONFIG
881 value: "/etc/http/http.client.conf"
882 - name: CONTIV_CRD_VALIDATE_INTERVAL
884 - name: CONTIV_CRD_VALIDATE_STATE
886 - name: DISABLE_NETCTL_REST
900 initialDelaySeconds: 10
908 initialDelaySeconds: 30
914 # Used to connect to contiv-etcd.
917 name: contiv-etcd-cfg
918 - name: usr-local-bin
923 name: contiv-crd-http-cfg
928 # This cluster role defines a set of permissions required for contiv-crd.
929 apiVersion: rbac.authorization.k8s.io/v1beta1
933 namespace: kube-system
936 - apiextensions.k8s.io
937 - nodeconfig.contiv.vpp
938 - telemetry.contiv.vpp
941 - customresourcedefinitions
945 - servicefunctionchains
951 # This defines a service account for contiv-crd.
956 namespace: kube-system
960 # This binds the contiv-crd cluster role with contiv-crd service account.
961 apiVersion: rbac.authorization.k8s.io/v1beta1
962 kind: ClusterRoleBinding
966 apiGroup: rbac.authorization.k8s.io
970 - kind: ServiceAccount
972 namespace: kube-system
979 name: contiv-crd-http-cfg
980 namespace: kube-system
983 endpoint: "0.0.0.0:9090"