Integrate contiv/vpp CNI into IEC 91/1491/4
authorJianlin Lv <Jianlin.Lv@arm.com>
Fri, 30 Aug 2019 02:34:14 +0000 (10:34 +0800)
committerJianlin Lv <Jianlin.Lv@arm.com>
Mon, 2 Sep 2019 09:01:24 +0000 (17:01 +0800)
Make master and work nodes support contivpp deployment

Signed-off-by: Jianlin Lv <Jianlin.Lv@arm.com>
Change-Id: I5741b26779136ce626fdeec22d4b9f88e4769681

src/foundation/scripts/cni/contivpp/contiv-update-config.sh [new file with mode: 0755]
src/foundation/scripts/cni/contivpp/contiv-vpp.yaml [new file with mode: 0644]
src/foundation/scripts/cni/contivpp/contiv-vswitch.conf [new file with mode: 0644]
src/foundation/scripts/config
src/foundation/scripts/setup-cni.sh
src/foundation/scripts/startup.sh

diff --git a/src/foundation/scripts/cni/contivpp/contiv-update-config.sh b/src/foundation/scripts/cni/contivpp/contiv-update-config.sh
new file mode 100755 (executable)
index 0000000..3fb6d11
--- /dev/null
@@ -0,0 +1,35 @@
+#!/bin/bash
+set -o xtrace
+set -e
+
+SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}")
+
+echo "SCRIPTS_DIR is :$SCRIPTS_DIR"
+
+DEV_NAME=${1:-}
+
+if [ -z "${DEV_NAME}" ]
+then
+  echo "Please specify a device name!"
+  exit 1
+fi
+
+# Extract PCI address
+PCI_ADDRESS=$(lshw -class network -businfo | awk -F '@| ' '/pci.*'$DEV_NAME'/ {printf $2}')
+if [ -z "${PCI_ADDRESS}" ]
+then
+  echo "PCI_ADDRESS is NULL, maybe $DEV_NAME is wrong!"
+  exit 1
+fi
+
+# Update config file
+mkdir -p /etc/vpp
+cp -f ${SCRIPTS_DIR}/contiv-vswitch.conf  /etc/vpp/contiv-vswitch.conf
+cat <<EOF >> /etc/vpp/contiv-vswitch.conf
+dpdk {
+    dev $PCI_ADDRESS
+}
+EOF
+
+# make sure that the selected interface is shut down, otherwise VPP would not grab it
+ifconfig $DEV_NAME down
diff --git a/src/foundation/scripts/cni/contivpp/contiv-vpp.yaml b/src/foundation/scripts/cni/contivpp/contiv-vpp.yaml
new file mode 100644 (file)
index 0000000..a3910f3
--- /dev/null
@@ -0,0 +1,1590 @@
+---
+# Source: contiv-vpp/templates/vpp.yaml
+# Contiv-VPP deployment YAML file. This deploys Contiv VPP networking on a Kuberntes cluster.
+# The deployment consists of the following components:
+#   - contiv-etcd - deployed on k8s master
+#   - contiv-vswitch - deployed on each k8s node
+#   - contiv-ksr - deployed on k8s master
+
+###########################################################
+#  Configuration
+###########################################################
+
+# This config map contains contiv-agent configuration. The most important part is the ipamConfig,
+# which may be updated in case the default IPAM settings do not match your needs.
+# nodeConfig may be used in case your nodes have more than 1 VPP interface. In that case, one
+# of them needs to be marked as the main inter-node interface, and the rest of them can be
+# configured with any IP addresses (the IPs cannot conflict with the main IPAM config).
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-agent-cfg
+  namespace: kube-system
+data:
+  contiv.conf: |-
+    useNoOverlay: false
+    useTAPInterfaces: true
+    tapInterfaceVersion: 2
+    tapv2RxRingSize: 256
+    tapv2TxRingSize: 256
+    tcpChecksumOffloadDisabled: true
+    STNVersion: 1
+    natExternalTraffic: true
+    mtuSize: 1450
+    scanIPNeighbors: true
+    ipNeighborScanInterval: 1
+    ipNeighborStaleThreshold: 4
+    enablePacketTrace: false
+    routeServiceCIDRToVPP: false
+    crdNodeConfigurationDisabled: true
+    ipamConfig:
+      nodeInterconnectDHCP: false
+      nodeInterconnectCIDR: 192.168.16.0/24
+      podSubnetCIDR: 10.1.0.0/16
+      podSubnetOneNodePrefixLen: 24
+      vppHostSubnetCIDR: 172.30.0.0/16
+      vppHostSubnetOneNodePrefixLen: 24
+      vxlanCIDR: 192.168.30.0/24
+  controller.conf: |
+    enableRetry: true
+    delayRetry: 1000000000
+    maxRetryAttempts: 3
+    enableExpBackoffRetry: true
+    delayLocalResync: 5000000000
+    startupResyncDeadline: 30000000000
+    enablePeriodicHealing: false
+    periodicHealingInterval: 30000000000
+    delayAfterErrorHealing: 5000000000
+    remoteDBProbingInterval: 3000000000
+    recordEventHistory: true
+    eventHistoryAgeLimit: 1440
+    permanentlyRecordedInitPeriod: 60
+  service.conf: |
+    cleanupIdleNATSessions: true
+    tcpNATSessionTimeout: 180
+    otherNATSessionTimeout: 5
+    serviceLocalEndpointWeight: 1
+    disableNATVirtualReassembly: false
+
+---
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vpp-agent-cfg
+  namespace: kube-system
+data:
+  govpp.conf: |
+    health-check-probe-interval: 3000000000
+    health-check-reply-timeout: 500000000
+    health-check-threshold: 3
+    reply-timeout: 3000000000
+  logs.conf: |
+    default-level: debug
+    loggers:
+      - name: statscollector
+        level: info
+      - name: vpp.if-state
+        level: info
+      - name: linux.arp-conf
+        level: info
+      - name: vpp-rest
+        level: info
+  grpc.conf: |
+    network: unix
+    endpoint: /var/run/contiv/cni.sock
+    force-socket-removal: true
+    permission: 700
+  http.conf: |
+    endpoint: "0.0.0.0:9999"
+  bolt.conf: |
+    db-path: /var/bolt/bolt.db
+    file-mode: 432
+    lock-timeout: 0
+  telemetry.conf: |
+    polling-interval: 30000000000
+    disabled: true
+  linux-ifplugin.conf: |
+    dump-go-routines-count: 5
+  linux-l3plugin.conf: |
+    dump-go-routines-count: 5
+  kvscheduler.conf: |
+    record-transaction-history: true
+    transaction-history-age-limit: 1440
+    permanently-recorded-init-period: 60
+
+---
+
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: contiv-cni-cfg
+  namespace: kube-system
+data:
+  # The CNI network configuration to install on each node. The special
+  # values in this config will be automatically populated.
+  10-contiv-vpp.conflist: |-
+    {
+      "name": "k8s-pod-network",
+      "cniVersion": "0.3.1",
+      "plugins": [
+        {
+          "type": "contiv-cni",
+          "grpcServer": "/var/run/contiv/cni.sock",
+          "logFile": "/var/run/contiv/cni.log"
+        },
+        {
+          "type": "portmap",
+          "capabilities": {
+              "portMappings": true
+          },
+          "externalSetMarkChain": "KUBE-MARK-MASQ"
+        }
+      ]
+    }
+---
+
+###########################################################
+#
+# !!! DO NOT EDIT THINGS BELOW THIS LINE !!!
+#
+###########################################################
+
+
+###########################################################
+#  Components and other resources
+###########################################################
+
+# This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
+# In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
+#   kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
+apiVersion: apps/v1beta2
+kind: DaemonSet
+metadata:
+  name: contiv-etcd-amd64
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-etcd
+spec:
+  selector:
+    matchLabels:
+      k8s-app: contiv-etcd
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-etcd
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Only run this pod on the master.
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/arch: amd64
+      hostNetwork: true
+
+      containers:
+        - name: contiv-etcd
+          image: quay.io/coreos/etcd:v3.3.11
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: CONTIV_ETCD_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.podIP
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+            - name: ETCDCTL_API
+              value: "3"
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
+              --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
+              --listen-peer-urls=http://0.0.0.0:12380
+          volumeMounts:
+            - name: var-etcd
+              mountPath: /var/etcd/
+          livenessProbe:
+            exec:
+              command:
+                - /bin/sh
+                - -c
+                - |
+                  echo "$HOST_IP" | grep -q ':'
+                  if [ "$?" -eq "0" ];
+                  then
+                     HOST_IP="[$HOST_IP]"
+                  fi
+                  etcdctl get --endpoints=$HOST_IP:32379 /
+            periodSeconds: 3
+            initialDelaySeconds: 20
+          resources:
+            requests:
+              cpu: 100m
+      volumes:
+        - name: var-etcd
+          hostPath:
+            path: /var/etcd
+
+---
+# This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
+# In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
+#   kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
+apiVersion: apps/v1beta2
+kind: DaemonSet
+metadata:
+  name: contiv-etcd-arm64
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-etcd
+spec:
+  selector:
+    matchLabels:
+      k8s-app: contiv-etcd
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-etcd
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Only run this pod on the master.
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/arch: arm64
+      hostNetwork: true
+
+      containers:
+        - name: contiv-etcd
+          image: quay.io/coreos/etcd:v3.3.11-arm64
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: CONTIV_ETCD_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.podIP
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+            - name: ETCDCTL_API
+              value: "3"
+            - name: ETCD_UNSUPPORTED_ARCH
+              value: "arm64"
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
+              --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
+              --listen-peer-urls=http://0.0.0.0:12380
+          volumeMounts:
+            - name: var-etcd
+              mountPath: /var/etcd/
+          livenessProbe:
+            exec:
+              command:
+                - /bin/sh
+                - -c
+                - |
+                  echo "$HOST_IP" | grep -q ':'
+                  if [ "$?" -eq "0" ];
+                  then
+                     HOST_IP="[$HOST_IP]"
+                  fi
+                  etcdctl get --endpoints=$HOST_IP:32379 /
+            periodSeconds: 3
+            initialDelaySeconds: 20
+          resources:
+            requests:
+              cpu: 100m
+      volumes:
+        - name: var-etcd
+          hostPath:
+            path: /var/etcd
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: contiv-etcd
+  namespace: kube-system
+spec:
+  type: NodePort
+  # Match contiv-etcd DaemonSet.
+  selector:
+    k8s-app: contiv-etcd
+  ports:
+    - port: 12379
+      nodePort: 32379
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-ksr-http-cfg
+  namespace: kube-system
+data:
+  http.conf: |
+    endpoint: "0.0.0.0:9191"
+
+---
+# This config map contains ETCD configuration for connecting to the contiv-etcd defined above.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-etcd-cfg
+  namespace: kube-system
+data:
+  etcd.conf: |
+    insecure-transport: true
+    dial-timeout: 10000000000
+    allow-delayed-start: true
+    endpoints:
+      - "__HOST_IP__:32379"
+
+---
+
+# This config map contains ETCD configuration for connecting to the contiv-etcd defined above with auto compact.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-etcd-withcompact-cfg
+  namespace: kube-system
+data:
+  etcd.conf: |
+    insecure-transport: true
+    dial-timeout: 10000000000
+    auto-compact: 600000000000
+    allow-delayed-start: true
+    reconnect-interval: 2000000000
+    endpoints:
+      - "__HOST_IP__:32379"
+
+---
+
+# This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
+# It consists of the following containers:
+#   - contiv-vswitch container: contains VPP and its management agent
+#   - contiv-cni container: installs CNI on the host
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: contiv-vswitch-amd64
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-vswitch
+spec:
+  selector:
+    matchLabels:
+      k8s-app: contiv-vswitch
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-vswitch
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      nodeSelector:
+        beta.kubernetes.io/arch: amd64
+      hostNetwork: true
+      hostPID: true
+
+      # Init containers are executed before regular containers, must finish successfully before regular ones
+      # are started.
+      initContainers:
+        # This container installs the Contiv CNI binaries and CNI network config file on each node.
+        - name: contiv-cni
+          image: iecedge/cni:v3.2.1
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: SLEEP
+              value: "false"
+          volumeMounts:
+            - mountPath: /opt/cni/bin
+              name: cni-bin-dir
+            - mountPath: /etc/cni/net.d
+              name: cni-net-dir
+            - mountPath: /cni/cfg
+              name: contiv-cni-cfg
+            - mountPath: /var/run/contiv
+              name: contiv-run
+
+        # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
+        - name: vpp-init
+          image: iecedge/vswitch:v3.2.1
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              set -eu
+              chmod 700 /run/vpp
+              rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
+              if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
+                  cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
+              fi
+              if [ ! -d /var/run/contiv ]; then
+                  mkdir /var/run/contiv
+              fi
+              chmod 700 /var/run/contiv
+              rm -f /var/run/contiv/cni.sock
+              if ip link show vpp1 >/dev/null 2>&1; then
+                   ip link del vpp1
+              fi
+              cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
+              sysctl -w debug.exception-trace=1
+              sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
+              ulimit -c unlimited
+              echo 2 > /proc/sys/fs/suid_dumpable
+              # replace localhost IP by node IP since node port doesn't work
+              # on localhost IP in a certain scenario
+              cp /etc/etcd/etcd.conf /tmp/etcd.conf
+              set +e
+              echo "$HOST_IP" | grep -q ':'
+              if [ "$?" -eq "0" ];
+              then
+                 HOST_IP="[$HOST_IP]"
+              fi
+              sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
+          resources: {}
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: usr-local-bin
+              mountPath: /host/usr/local/bin
+            - name: vpp-cfg
+              mountPath: /host/etc/vpp
+            - name: shm
+              mountPath: /dev/shm
+            - name: vpp-run
+              mountPath: /run/vpp
+            - name: contiv-run
+              mountPath: /var/run/contiv
+            - name: tmp
+              mountPath: /tmp
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+            - name: core-dumps
+              mountPath: /var/contiv/dumps
+
+      containers:
+        # Runs contiv-vswitch container on each Kubernetes node.
+        # It contains the vSwitch VPP and its management agent.
+        - name: contiv-vswitch
+          image: iecedge/vswitch:v3.2.1
+          imagePullPolicy: IfNotPresent
+          securityContext:
+            privileged: true
+          ports:
+            # readiness + liveness probe
+            - containerPort: 9999
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9999
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 15
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9999
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 60
+          env:
+            - name: MICROSERVICE_LABEL
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+            - name: GOVPPMUX_NOSOCK
+              value: "1"
+            - name: CONTIV_CONFIG
+              value: "/etc/contiv/contiv.conf"
+            - name: CONTROLLER_CONFIG
+              value: "/etc/contiv/controller.conf"
+            - name: SERVICE_CONFIG
+              value: "/etc/contiv/service.conf"
+            - name: ETCD_CONFIG
+              value: "/tmp/etcd.conf"
+            - name: BOLT_CONFIG
+              value: "/etc/vpp-agent/bolt.conf"
+            # Uncomment to log graph traversal (very verbose):
+            # - name: KVSCHED_LOG_GRAPH_WALK
+            #   value: "true"
+            # Uncomment to verify effect of every transaction:
+            # - name: KVSCHED_VERIFY_MODE
+            #   value: "true"
+            - name: TELEMETRY_CONFIG
+              value: "/etc/vpp-agent/telemetry.conf"
+            - name: GOVPP_CONFIG
+              value: "/etc/vpp-agent/govpp.conf"
+            - name: LOGS_CONFIG
+              value: "/etc/vpp-agent/logs.conf"
+            - name: HTTP_CONFIG
+              value: "/etc/vpp-agent/http.conf"
+            - name: GRPC_CONFIG
+              value: "/etc/vpp-agent/grpc.conf"
+            - name: LINUX_IFPLUGIN_CONFIG
+              value: "/etc/vpp-agent/linux-ifplugin.conf"
+            - name: LINUX_L3PLUGIN_CONFIG
+              value: "/etc/vpp-agent/linux-l3plugin.conf"
+            - name: KVSCHEDULER_CONFIG
+              value: "/etc/vpp-agent/kvscheduler.conf"
+          volumeMounts:
+            - name: var-bolt
+              mountPath: /var/bolt
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+            - name: vpp-cfg
+              mountPath: /etc/vpp
+            - name: shm
+              mountPath: /dev/shm
+            - name: dev
+              mountPath: /dev
+            - name: sys-bus-pci
+              mountPath: /sys/bus/pci
+            - name: vpp-run
+              mountPath: /run/vpp
+            - name: contiv-run
+              mountPath: /var/run/contiv
+            - name: contiv-agent-cfg
+              mountPath: /etc/contiv
+            - name: vpp-agent-cfg
+              mountPath: /etc/vpp-agent
+            - name: tmp
+              mountPath: /tmp
+            - name: core-dumps
+              mountPath: /var/contiv/dumps
+            - name: docker-socket
+              mountPath: /var/run/docker.sock
+          resources:
+            requests:
+              cpu: 250m
+
+      volumes:
+        # Used to connect to contiv-etcd.
+        - name: etcd-cfg
+          configMap:
+            name: contiv-etcd-cfg
+        # Used to install CNI.
+        - name: cni-bin-dir
+          hostPath:
+            path: /opt/cni/bin
+        - name: cni-net-dir
+          hostPath:
+            path: /etc/cni/net.d
+        # VPP startup config folder.
+        - name: vpp-cfg
+          hostPath:
+            path: /etc/vpp
+        # To install vppctl.
+        - name: usr-local-bin
+          hostPath:
+            path: /usr/local/bin
+        # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication
+        # with VPP (/dev/shm)
+        - name: dev
+          hostPath:
+            path: /dev
+        - name: shm
+          hostPath:
+            path: /dev/shm
+        # /sys/bus/pci is required for binding PCI devices to specific drivers
+        - name: sys-bus-pci
+          hostPath:
+            path: /sys/bus/pci
+        # For CLI unix socket.
+        - name: vpp-run
+          hostPath:
+            path: /run/vpp
+        # For CNI / STN unix domain socket
+        - name: contiv-run
+          hostPath:
+            path: /var/run/contiv
+        # Used to configure contiv agent.
+        - name: contiv-agent-cfg
+          configMap:
+            name: contiv-agent-cfg
+        # Used to configure vpp agent.
+        - name: vpp-agent-cfg
+          configMap:
+            name: vpp-agent-cfg
+        # Used for vswitch core dumps
+        - name: core-dumps
+          hostPath:
+            path: /var/contiv/dumps
+        # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
+        - name: tmp
+          emptyDir:
+            medium: Memory
+        # persisted bolt data
+        - name: var-bolt
+          hostPath:
+            path: /var/bolt
+        - name: docker-socket
+          hostPath:
+            path: /var/run/docker.sock
+        # CNI config
+        - name: contiv-cni-cfg
+          configMap:
+            name: contiv-cni-cfg
+
+---
+# This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
+# It consists of the following containers:
+#   - contiv-vswitch container: contains VPP and its management agent
+#   - contiv-cni container: installs CNI on the host
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: contiv-vswitch-arm64
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-vswitch
+spec:
+  selector:
+    matchLabels:
+      k8s-app: contiv-vswitch
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-vswitch
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      nodeSelector:
+        beta.kubernetes.io/arch: arm64
+      hostNetwork: true
+      hostPID: true
+
+      # Init containers are executed before regular containers, must finish successfully before regular ones
+      # are started.
+      initContainers:
+        # This container installs the Contiv CNI binaries and CNI network config file on each node.
+        - name: contiv-cni
+          image: iecedge/cni-arm64:v3.2.1
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: SLEEP
+              value: "false"
+          volumeMounts:
+            - mountPath: /opt/cni/bin
+              name: cni-bin-dir
+            - mountPath: /etc/cni/net.d
+              name: cni-net-dir
+            - mountPath: /cni/cfg
+              name: contiv-cni-cfg
+            - mountPath: /var/run/contiv
+              name: contiv-run
+
+        # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
+        - name: vpp-init
+          image: iecedge/vswitch-arm64:v3.2.1
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              set -eu
+              chmod 700 /run/vpp
+              rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
+              if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
+                  cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
+              fi
+              if [ ! -d /var/run/contiv ]; then
+                  mkdir /var/run/contiv
+              fi
+              chmod 700 /var/run/contiv
+              rm -f /var/run/contiv/cni.sock
+              if ip link show vpp1 >/dev/null 2>&1; then
+                   ip link del vpp1
+              fi
+              cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
+              sysctl -w debug.exception-trace=1
+              sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
+              ulimit -c unlimited
+              echo 2 > /proc/sys/fs/suid_dumpable
+              # replace localhost IP by node IP since node port doesn't work
+              # on localhost IP in a certain scenario
+              cp /etc/etcd/etcd.conf /tmp/etcd.conf
+              set +e
+              echo "$HOST_IP" | grep -q ':'
+              if [ "$?" -eq "0" ];
+              then
+                 HOST_IP="[$HOST_IP]"
+              fi
+              sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
+          resources: {}
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: usr-local-bin
+              mountPath: /host/usr/local/bin
+            - name: vpp-cfg
+              mountPath: /host/etc/vpp
+            - name: shm
+              mountPath: /dev/shm
+            - name: vpp-run
+              mountPath: /run/vpp
+            - name: contiv-run
+              mountPath: /var/run/contiv
+            - name: tmp
+              mountPath: /tmp
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+            - name: core-dumps
+              mountPath: /var/contiv/dumps
+
+      containers:
+        # Runs contiv-vswitch container on each Kubernetes node.
+        # It contains the vSwitch VPP and its management agent.
+        - name: contiv-vswitch
+          image: iecedge/vswitch-arm64:v3.2.1
+          imagePullPolicy: IfNotPresent
+          securityContext:
+            privileged: true
+          ports:
+            # readiness + liveness probe
+            - containerPort: 9999
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9999
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 15
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9999
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 60
+          env:
+            - name: MICROSERVICE_LABEL
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+            - name: GOVPPMUX_NOSOCK
+              value: "1"
+            - name: CONTIV_CONFIG
+              value: "/etc/contiv/contiv.conf"
+            - name: CONTROLLER_CONFIG
+              value: "/etc/contiv/controller.conf"
+            - name: SERVICE_CONFIG
+              value: "/etc/contiv/service.conf"
+            - name: ETCD_CONFIG
+              value: "/tmp/etcd.conf"
+            - name: BOLT_CONFIG
+              value: "/etc/vpp-agent/bolt.conf"
+            # Uncomment to log graph traversal (very verbose):
+            # - name: KVSCHED_LOG_GRAPH_WALK
+            #   value: "true"
+            # Uncomment to verify effect of every transaction:
+            # - name: KVSCHED_VERIFY_MODE
+            #   value: "true"
+            - name: TELEMETRY_CONFIG
+              value: "/etc/vpp-agent/telemetry.conf"
+            - name: GOVPP_CONFIG
+              value: "/etc/vpp-agent/govpp.conf"
+            - name: LOGS_CONFIG
+              value: "/etc/vpp-agent/logs.conf"
+            - name: HTTP_CONFIG
+              value: "/etc/vpp-agent/http.conf"
+            - name: GRPC_CONFIG
+              value: "/etc/vpp-agent/grpc.conf"
+            - name: LINUX_IFPLUGIN_CONFIG
+              value: "/etc/vpp-agent/linux-ifplugin.conf"
+            - name: LINUX_L3PLUGIN_CONFIG
+              value: "/etc/vpp-agent/linux-l3plugin.conf"
+            - name: KVSCHEDULER_CONFIG
+              value: "/etc/vpp-agent/kvscheduler.conf"
+          volumeMounts:
+            - name: var-bolt
+              mountPath: /var/bolt
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+            - name: vpp-cfg
+              mountPath: /etc/vpp
+            - name: shm
+              mountPath: /dev/shm
+            - name: dev
+              mountPath: /dev
+            - name: sys-bus-pci
+              mountPath: /sys/bus/pci
+            - name: vpp-run
+              mountPath: /run/vpp
+            - name: contiv-run
+              mountPath: /var/run/contiv
+            - name: contiv-agent-cfg
+              mountPath: /etc/contiv
+            - name: vpp-agent-cfg
+              mountPath: /etc/vpp-agent
+            - name: tmp
+              mountPath: /tmp
+            - name: core-dumps
+              mountPath: /var/contiv/dumps
+            - name: docker-socket
+              mountPath: /var/run/docker.sock
+          resources:
+            requests:
+              cpu: 250m
+
+      volumes:
+        # Used to connect to contiv-etcd.
+        - name: etcd-cfg
+          configMap:
+            name: contiv-etcd-cfg
+        # Used to install CNI.
+        - name: cni-bin-dir
+          hostPath:
+            path: /opt/cni/bin
+        - name: cni-net-dir
+          hostPath:
+            path: /etc/cni/net.d
+        # VPP startup config folder.
+        - name: vpp-cfg
+          hostPath:
+            path: /etc/vpp
+        # To install vppctl.
+        - name: usr-local-bin
+          hostPath:
+            path: /usr/local/bin
+        # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication with
+        # VPP (/dev/shm)
+        - name: dev
+          hostPath:
+            path: /dev
+        - name: shm
+          hostPath:
+            path: /dev/shm
+        # /sys/bus/pci is required for binding PCI devices to specific drivers
+        - name: sys-bus-pci
+          hostPath:
+            path: /sys/bus/pci
+        # For CLI unix socket.
+        - name: vpp-run
+          hostPath:
+            path: /run/vpp
+        # For CNI / STN unix domain socket
+        - name: contiv-run
+          hostPath:
+            path: /var/run/contiv
+        # Used to configure contiv agent.
+        - name: contiv-agent-cfg
+          configMap:
+            name: contiv-agent-cfg
+        # Used to configure vpp agent.
+        - name: vpp-agent-cfg
+          configMap:
+            name: vpp-agent-cfg
+        # Used for vswitch core dumps
+        - name: core-dumps
+          hostPath:
+            path: /var/contiv/dumps
+        # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
+        - name: tmp
+          emptyDir:
+            medium: Memory
+        # persisted bolt data
+        - name: var-bolt
+          hostPath:
+            path: /var/bolt
+        - name: docker-socket
+          hostPath:
+            path: /var/run/docker.sock
+        # CNI config
+        - name: contiv-cni-cfg
+          configMap:
+            name: contiv-cni-cfg
+
+---
+# This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: contiv-ksr-amd64
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-ksr
+spec:
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-ksr
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Only run this pod on the master.
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/arch: amd64
+      hostNetwork: true
+      # This grants the required permissions to contiv-ksr.
+      serviceAccountName: contiv-ksr
+
+      initContainers:
+        # This init container waits until etcd is started
+        - name: wait-foretcd
+          env:
+            - name: ETCDPORT
+              value: "32379"
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          image: busybox:1.29.3
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+              echo "$HOST_IP" | grep -q ':'
+              if [ "$?" -eq "0" ];
+              then
+                 HOST_IP="[$HOST_IP]"
+              fi
+              sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+              until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+
+
+      containers:
+        - name: contiv-ksr
+          image: iecedge/ksr:v3.2.1
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: ETCD_CONFIG
+              value: "/tmp/cfg/etcd.conf"
+            - name: HTTP_CONFIG
+              value: "/etc/http/http.conf"
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: http-cfg
+              mountPath: /etc/http
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9191
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 10
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9191
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 30
+          resources:
+            requests:
+              cpu: 100m
+      volumes:
+        # Used to connect to contiv-etcd.
+        - name: etcd-cfg
+          configMap:
+            name: contiv-etcd-withcompact-cfg
+        - name: tmp-cfg
+          emptyDir: {}
+        - name: http-cfg
+          configMap:
+            name: contiv-ksr-http-cfg
+
+---
+# This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: contiv-ksr-arm64
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-ksr
+spec:
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-ksr
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Only run this pod on the master.
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/arch: arm64
+      hostNetwork: true
+      # This grants the required permissions to contiv-ksr.
+      serviceAccountName: contiv-ksr
+
+      initContainers:
+        # This init container waits until etcd is started
+        - name: wait-foretcd
+          env:
+            - name: ETCDPORT
+              value: "32379"
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          image: arm64v8/busybox:1.29.3
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+              echo "$HOST_IP" | grep -q ':'
+              if [ "$?" -eq "0" ];
+              then
+                 HOST_IP="[$HOST_IP]"
+              fi
+              sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+              until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+
+
+      containers:
+        - name: contiv-ksr
+          image: iecedge/ksr-arm64:v3.2.1
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: ETCD_CONFIG
+              value: "/tmp/cfg/etcd.conf"
+            - name: HTTP_CONFIG
+              value: "/etc/http/http.conf"
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: http-cfg
+              mountPath: /etc/http
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9191
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 10
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9191
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 30
+          resources:
+            requests:
+              cpu: 100m
+      volumes:
+        # Used to connect to contiv-etcd.
+        - name: etcd-cfg
+          configMap:
+            name: contiv-etcd-withcompact-cfg
+        - name: tmp-cfg
+          emptyDir: {}
+        - name: http-cfg
+          configMap:
+            name: contiv-ksr-http-cfg
+
+---
+
+# This cluster role defines a set of permissions required for contiv-ksr.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: contiv-ksr
+  namespace: kube-system
+rules:
+  - apiGroups:
+      - ""
+      - extensions
+    resources:
+      - pods
+      - namespaces
+      - networkpolicies
+      - services
+      - endpoints
+      - nodes
+    verbs:
+      - watch
+      - list
+
+---
+
+# This defines a service account for contiv-ksr.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: contiv-ksr
+  namespace: kube-system
+
+---
+
+# This binds the contiv-ksr cluster role with contiv-ksr service account.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: contiv-ksr
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: contiv-ksr
+subjects:
+  - kind: ServiceAccount
+    name: contiv-ksr
+    namespace: kube-system
+
+---
+
+# This installs the contiv-crd on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: contiv-crd-amd64
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-crd
+spec:
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-crd
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Only run this pod on the master.
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/arch: amd64
+      hostNetwork: true
+      # This grants the required permissions to contiv-crd.
+      serviceAccountName: contiv-crd
+
+      initContainers:
+        # This init container waits until etcd is started
+        - name: wait-foretcd
+          env:
+            - name: ETCDPORT
+              value: "32379"
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          image: busybox:1.29.3
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+              echo "$HOST_IP" | grep -q ':'
+              if [ "$?" -eq "0" ];
+              then
+                 HOST_IP="[$HOST_IP]"
+              fi
+              sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+              until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+
+        # This init container copies contiv-netctl tool to the host.
+        - name: netctl-init
+          image: iecedge/crd:v3.2.1
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              echo '#!/bin/sh
+              kubectl get pods -n kube-system | \
+                grep contiv-crd | \
+                cut -d " " -f 1 | \
+                xargs -I{} kubectl exec -n kube-system {} \
+                /contiv-netctl "$@"' \
+              > /host/usr/local/bin/contiv-netctl || true
+              chmod +x /host/usr/local/bin/contiv-netctl || true
+          volumeMounts:
+            - name: usr-local-bin
+              mountPath: /host/usr/local/bin
+
+      containers:
+        - name: contiv-crd
+          image: iecedge/crd:v3.2.1
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: ETCD_CONFIG
+              value: "/tmp/cfg/etcd.conf"
+            - name: HTTP_CONFIG
+              value: "/etc/http/http.conf"
+            - name: HTTP_CLIENT_CONFIG
+              value: "/etc/http/http.client.conf"
+            - name: CONTIV_CRD_VALIDATE_INTERVAL
+              value: "5"
+            - name: CONTIV_CRD_VALIDATE_STATE
+              value: "SB"
+            - name: DISABLE_NETCTL_REST
+              value: "true"
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: http-cfg
+              mountPath: /etc/http
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9090
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 10
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9090
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 30
+          resources:
+            requests:
+              cpu: 100m
+
+      volumes:
+        # Used to connect to contiv-etcd.
+        - name: etcd-cfg
+          configMap:
+            name: contiv-etcd-cfg
+        - name: usr-local-bin
+          hostPath:
+            path: /usr/local/bin
+        - name: http-cfg
+          configMap:
+            name: contiv-crd-http-cfg
+        - name: tmp-cfg
+          emptyDir: {}
+---
+# This installs the contiv-crd on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: contiv-crd-arm64
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-crd
+spec:
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-crd
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Only run this pod on the master.
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/arch: arm64
+      hostNetwork: true
+      # This grants the required permissions to contiv-crd.
+      serviceAccountName: contiv-crd
+
+      initContainers:
+        # This init container waits until etcd is started
+        - name: wait-foretcd
+          env:
+            - name: ETCDPORT
+              value: "32379"
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          image: arm64v8/busybox:1.29.3
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+              echo "$HOST_IP" | grep -q ':'
+              if [ "$?" -eq "0" ];
+              then
+                 HOST_IP="[$HOST_IP]"
+              fi
+              sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+              until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+
+        # This init container copies contiv-netctl tool to the host.
+        - name: netctl-init
+          image: iecedge/crd-arm64:v3.2.1
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              echo '#!/bin/sh
+              kubectl get pods -n kube-system | \
+                grep contiv-crd | \
+                cut -d " " -f 1 | \
+                xargs -I{} kubectl exec -n kube-system {} \
+                /contiv-netctl "$@"' \
+              > /host/usr/local/bin/contiv-netctl || true
+              chmod +x /host/usr/local/bin/contiv-netctl || true
+          volumeMounts:
+            - name: usr-local-bin
+              mountPath: /host/usr/local/bin
+
+      containers:
+        - name: contiv-crd
+          image: iecedge/crd-arm64:v3.2.1
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: ETCD_CONFIG
+              value: "/tmp/cfg/etcd.conf"
+            - name: HTTP_CONFIG
+              value: "/etc/http/http.conf"
+            - name: HTTP_CLIENT_CONFIG
+              value: "/etc/http/http.client.conf"
+            - name: CONTIV_CRD_VALIDATE_INTERVAL
+              value: "5"
+            - name: CONTIV_CRD_VALIDATE_STATE
+              value: "SB"
+            - name: DISABLE_NETCTL_REST
+              value: "true"
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: http-cfg
+              mountPath: /etc/http
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9090
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 10
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9090
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 30
+          resources:
+            requests:
+              cpu: 100m
+
+      volumes:
+        # Used to connect to contiv-etcd.
+        - name: etcd-cfg
+          configMap:
+            name: contiv-etcd-cfg
+        - name: usr-local-bin
+          hostPath:
+            path: /usr/local/bin
+        - name: http-cfg
+          configMap:
+            name: contiv-crd-http-cfg
+        - name: tmp-cfg
+          emptyDir: {}
+---
+
+# This cluster role defines a set of permissions required for contiv-crd.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: contiv-crd
+  namespace: kube-system
+rules:
+  - apiGroups:
+      - apiextensions.k8s.io
+      - nodeconfig.contiv.vpp
+      - telemetry.contiv.vpp
+    resources:
+      - customresourcedefinitions
+      - telemetryreports
+      - nodeconfigs
+    verbs:
+      - "*"
+
+---
+
+# This defines a service account for contiv-crd.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: contiv-crd
+  namespace: kube-system
+
+---
+
+# This binds the contiv-crd cluster role with contiv-crd service account.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: contiv-crd
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: contiv-crd
+subjects:
+  - kind: ServiceAccount
+    name: contiv-crd
+    namespace: kube-system
+
+---
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-crd-http-cfg
+  namespace: kube-system
+data:
+  http.conf: |
+    endpoint: "0.0.0.0:9090"
+  http.client.conf: |
+    port: 9999
+    use-https: false
diff --git a/src/foundation/scripts/cni/contivpp/contiv-vswitch.conf b/src/foundation/scripts/cni/contivpp/contiv-vswitch.conf
new file mode 100644 (file)
index 0000000..4bebadc
--- /dev/null
@@ -0,0 +1,26 @@
+unix {
+    nodaemon
+    cli-listen /run/vpp/cli.sock
+    cli-no-pager
+    poll-sleep-usec 100
+}
+nat {
+    endpoint-dependent
+    translation hash buckets 1048576
+    translation hash memory 268435456
+    user hash buckets 1024
+    max translations per user 10000
+}
+acl-plugin {
+    use tuple merge 0
+}
+api-trace {
+    on
+    nitems 5000
+}
+socksvr {
+   default
+}
+statseg {
+   default
+}
index a1ad323..f4c27b0 100755 (executable)
@@ -29,9 +29,17 @@ K8S_WORKER_GROUP=(
 # K8s parameter
 CLUSTER_IP=172.16.1.136 # Align with the value in our K8s setup script
 POD_NETWORK_CIDR=192.168.0.0/16
-#CNI type: flannel/calico/contivvpp
+#IEC support three kinds network solution for Kubernetes: calico,flannel,contivpp
 CNI_TYPE=calico
 #kubernetes-cni version 0.7.5/ 0.6.0
 CNI_VERSION=0.6.0
 #kubernetes version: 1.15.2/ 1.13.0
 KUBE_VERSION=1.13.0
+
+# DEV_NAME is an associative array, list network interface device names used by contivpp,
+# Use IP address of K8S_WORKER_GROUP as key, for example
+#  DEV_NAME=(
+#  [10.169.40.106]="enp137s0f0"
+#  )
+declare -A DEV_NAME
+DEV_NAME=()
index b383b97..d1b27bc 100755 (executable)
@@ -11,6 +11,7 @@ fi
 CLUSTER_IP=${1:-172.16.1.136} # Align with the value in our K8s setup script
 POD_NETWORK_CIDR=${2:-192.168.0.0/16}
 CNI_TYPE=${3:-calico}
+DEV_NAME=${4:-}
 
 SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}")
 
@@ -36,9 +37,13 @@ install_flannel(){
   kubectl apply -f "${SCRIPTS_DIR}/cni/flannel/kube-flannel.yml"
 }
 
-install_contiv(){
-  # Install the Contiv-vpp
-  echo "World peach!!!!!!!!!!!!!!"
+install_contivpp(){
+  # Update vpp config file
+  ${SCRIPTS_DIR}/cni/contivpp/contiv-update-config.sh $DEV_NAME
+
+  # Install contivpp CNI
+  sed -i "s@10.1.0.0/16@${POD_NETWORK_CIDR}@" "${SCRIPTS_DIR}/cni/contivpp/contiv-vpp.yaml"
+  kubectl apply -f "${SCRIPTS_DIR}/cni/contivpp/contiv-vpp.yaml"
 }
 
 case ${CNI_TYPE} in
@@ -50,12 +55,12 @@ case ${CNI_TYPE} in
         echo "Install flannel ..."
         install_flannel
         ;;
- 'contivvpp')
-        echo "Install Contiv-vpp ..."
-        install_contiv
+ 'contivpp')
+        echo "Install Contiv-VPP ..."
+        install_contivpp
         ;;
  *)
-        echo "${CNI_TYPE} does not supportted"
+        echo "${CNI_TYPE} is not supported"
         exit 1
         ;;
 esac
index 123039d..9aeda20 100755 (executable)
@@ -71,7 +71,12 @@ deploy_k8s () {
     sshpass -p ${passwd} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${ip_addr} ${INSTALL_SOFTWARE}
     sshpass -p ${passwd} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${ip_addr} "echo \"sudo ${KUBEADM_JOIN_CMD}\" >> ./iec/src/foundation/scripts/k8s_worker.sh"
     sleep 2
-    sshpass -p ${passwd} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${ip_addr} "swapon -a"
+    if [ -n "${CNI_TYPE}" ] && [ ${CNI_TYPE} == "contivpp" ] && [ -n "${DEV_NAME[$ip_addr]}" ]
+    then
+      CONTIVPP_CONFIG="cd iec/src/foundation/scripts/cni/contivpp && sudo ./contiv-update-config.sh ${DEV_NAME[$ip_addr]}"
+      sshpass -p ${passwd} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${ip_addr} $CONTIVPP_CONFIG
+    fi
+    sshpass -p ${passwd} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${ip_addr} "sudo swapon -a"
     sshpass -p ${passwd} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${ip_addr} ${SETUP_WORKER}
 
   done