--- /dev/null
+---
+# Source: contiv-vpp/templates/vpp.yaml
+# Contiv-VPP deployment YAML file. This deploys Contiv VPP networking on a Kuberntes cluster.
+# The deployment consists of the following components:
+# - contiv-etcd - deployed on k8s master
+# - contiv-vswitch - deployed on each k8s node
+# - contiv-ksr - deployed on k8s master
+
+###########################################################
+# Configuration
+###########################################################
+
+# This config map contains contiv-agent configuration. The most important part is the ipamConfig,
+# which may be updated in case the default IPAM settings do not match your needs.
+# nodeConfig may be used in case your nodes have more than 1 VPP interface. In that case, one
+# of them needs to be marked as the main inter-node interface, and the rest of them can be
+# configured with any IP addresses (the IPs cannot conflict with the main IPAM config).
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: contiv-agent-cfg
+ namespace: kube-system
+data:
+ contiv.conf: |-
+ useNoOverlay: false
+ useTAPInterfaces: true
+ tapInterfaceVersion: 2
+ tapv2RxRingSize: 256
+ tapv2TxRingSize: 256
+ tcpChecksumOffloadDisabled: true
+ STNVersion: 1
+ natExternalTraffic: true
+ mtuSize: 1450
+ scanIPNeighbors: true
+ ipNeighborScanInterval: 1
+ ipNeighborStaleThreshold: 4
+ enablePacketTrace: false
+ routeServiceCIDRToVPP: false
+ crdNodeConfigurationDisabled: true
+ ipamConfig:
+ nodeInterconnectDHCP: false
+ nodeInterconnectCIDR: 192.168.16.0/24
+ podSubnetCIDR: 10.1.0.0/16
+ podSubnetOneNodePrefixLen: 24
+ vppHostSubnetCIDR: 172.30.0.0/16
+ vppHostSubnetOneNodePrefixLen: 24
+ vxlanCIDR: 192.168.30.0/24
+ controller.conf: |
+ enableRetry: true
+ delayRetry: 1000000000
+ maxRetryAttempts: 3
+ enableExpBackoffRetry: true
+ delayLocalResync: 5000000000
+ startupResyncDeadline: 30000000000
+ enablePeriodicHealing: false
+ periodicHealingInterval: 30000000000
+ delayAfterErrorHealing: 5000000000
+ remoteDBProbingInterval: 3000000000
+ recordEventHistory: true
+ eventHistoryAgeLimit: 1440
+ permanentlyRecordedInitPeriod: 60
+ service.conf: |
+ cleanupIdleNATSessions: true
+ tcpNATSessionTimeout: 180
+ otherNATSessionTimeout: 5
+ serviceLocalEndpointWeight: 1
+ disableNATVirtualReassembly: false
+
+---
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: vpp-agent-cfg
+ namespace: kube-system
+data:
+ govpp.conf: |
+ health-check-probe-interval: 3000000000
+ health-check-reply-timeout: 500000000
+ health-check-threshold: 3
+ reply-timeout: 3000000000
+ logs.conf: |
+ default-level: debug
+ loggers:
+ - name: statscollector
+ level: info
+ - name: vpp.if-state
+ level: info
+ - name: linux.arp-conf
+ level: info
+ - name: vpp-rest
+ level: info
+ grpc.conf: |
+ network: unix
+ endpoint: /var/run/contiv/cni.sock
+ force-socket-removal: true
+ permission: 700
+ http.conf: |
+ endpoint: "0.0.0.0:9999"
+ bolt.conf: |
+ db-path: /var/bolt/bolt.db
+ file-mode: 432
+ lock-timeout: 0
+ telemetry.conf: |
+ polling-interval: 30000000000
+ disabled: true
+ linux-ifplugin.conf: |
+ dump-go-routines-count: 5
+ linux-l3plugin.conf: |
+ dump-go-routines-count: 5
+ kvscheduler.conf: |
+ record-transaction-history: true
+ transaction-history-age-limit: 1440
+ permanently-recorded-init-period: 60
+
+---
+
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: contiv-cni-cfg
+ namespace: kube-system
+data:
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ 10-contiv-vpp.conflist: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "contiv-cni",
+ "grpcServer": "/var/run/contiv/cni.sock",
+ "logFile": "/var/run/contiv/cni.log"
+ },
+ {
+ "type": "portmap",
+ "capabilities": {
+ "portMappings": true
+ },
+ "externalSetMarkChain": "KUBE-MARK-MASQ"
+ }
+ ]
+ }
+---
+
+###########################################################
+#
+# !!! DO NOT EDIT THINGS BELOW THIS LINE !!!
+#
+###########################################################
+
+
+###########################################################
+# Components and other resources
+###########################################################
+
+# This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
+# In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
+# kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
+apiVersion: apps/v1beta2
+kind: DaemonSet
+metadata:
+ name: contiv-etcd-amd64
+ namespace: kube-system
+ labels:
+ k8s-app: contiv-etcd
+spec:
+ selector:
+ matchLabels:
+ k8s-app: contiv-etcd
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: contiv-etcd
+ annotations:
+ # Marks this pod as a critical add-on.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+ - key: ''
+ operator: Exists
+ effect: ''
+ # This likely isn't needed due to the above wildcard, but keep it in for now.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ beta.kubernetes.io/arch: amd64
+ hostNetwork: true
+
+ containers:
+ - name: contiv-etcd
+ image: quay.io/coreos/etcd:v3.3.11
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: CONTIV_ETCD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: ETCDCTL_API
+ value: "3"
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
+ --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
+ --listen-peer-urls=http://0.0.0.0:12380
+ volumeMounts:
+ - name: var-etcd
+ mountPath: /var/etcd/
+ livenessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "$HOST_IP" | grep -q ':'
+ if [ "$?" -eq "0" ];
+ then
+ HOST_IP="[$HOST_IP]"
+ fi
+ etcdctl get --endpoints=$HOST_IP:32379 /
+ periodSeconds: 3
+ initialDelaySeconds: 20
+ resources:
+ requests:
+ cpu: 100m
+ volumes:
+ - name: var-etcd
+ hostPath:
+ path: /var/etcd
+
+---
+# This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
+# In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
+# kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
+apiVersion: apps/v1beta2
+kind: DaemonSet
+metadata:
+ name: contiv-etcd-arm64
+ namespace: kube-system
+ labels:
+ k8s-app: contiv-etcd
+spec:
+ selector:
+ matchLabels:
+ k8s-app: contiv-etcd
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: contiv-etcd
+ annotations:
+ # Marks this pod as a critical add-on.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+ - key: ''
+ operator: Exists
+ effect: ''
+ # This likely isn't needed due to the above wildcard, but keep it in for now.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ beta.kubernetes.io/arch: arm64
+ hostNetwork: true
+
+ containers:
+ - name: contiv-etcd
+ image: quay.io/coreos/etcd:v3.3.11-arm64
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: CONTIV_ETCD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: ETCDCTL_API
+ value: "3"
+ - name: ETCD_UNSUPPORTED_ARCH
+ value: "arm64"
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
+ --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
+ --listen-peer-urls=http://0.0.0.0:12380
+ volumeMounts:
+ - name: var-etcd
+ mountPath: /var/etcd/
+ livenessProbe:
+ exec:
+ command:
+ - /bin/sh
+ - -c
+ - |
+ echo "$HOST_IP" | grep -q ':'
+ if [ "$?" -eq "0" ];
+ then
+ HOST_IP="[$HOST_IP]"
+ fi
+ etcdctl get --endpoints=$HOST_IP:32379 /
+ periodSeconds: 3
+ initialDelaySeconds: 20
+ resources:
+ requests:
+ cpu: 100m
+ volumes:
+ - name: var-etcd
+ hostPath:
+ path: /var/etcd
+
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: contiv-etcd
+ namespace: kube-system
+spec:
+ type: NodePort
+ # Match contiv-etcd DaemonSet.
+ selector:
+ k8s-app: contiv-etcd
+ ports:
+ - port: 12379
+ nodePort: 32379
+
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: contiv-ksr-http-cfg
+ namespace: kube-system
+data:
+ http.conf: |
+ endpoint: "0.0.0.0:9191"
+
+---
+# This config map contains ETCD configuration for connecting to the contiv-etcd defined above.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: contiv-etcd-cfg
+ namespace: kube-system
+data:
+ etcd.conf: |
+ insecure-transport: true
+ dial-timeout: 10000000000
+ allow-delayed-start: true
+ endpoints:
+ - "__HOST_IP__:32379"
+
+---
+
+# This config map contains ETCD configuration for connecting to the contiv-etcd defined above with auto compact.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: contiv-etcd-withcompact-cfg
+ namespace: kube-system
+data:
+ etcd.conf: |
+ insecure-transport: true
+ dial-timeout: 10000000000
+ auto-compact: 600000000000
+ allow-delayed-start: true
+ reconnect-interval: 2000000000
+ endpoints:
+ - "__HOST_IP__:32379"
+
+---
+
+# This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
+# It consists of the following containers:
+# - contiv-vswitch container: contains VPP and its management agent
+# - contiv-cni container: installs CNI on the host
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-vswitch-amd64
+ namespace: kube-system
+ labels:
+ k8s-app: contiv-vswitch
+spec:
+ selector:
+ matchLabels:
+ k8s-app: contiv-vswitch
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: contiv-vswitch
+ annotations:
+ # Marks this pod as a critical add-on.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+ - key: ''
+ operator: Exists
+ effect: ''
+ # This likely isn't needed due to the above wildcard, but keep it in for now.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ nodeSelector:
+ beta.kubernetes.io/arch: amd64
+ hostNetwork: true
+ hostPID: true
+
+ # Init containers are executed before regular containers, must finish successfully before regular ones
+ # are started.
+ initContainers:
+ # This container installs the Contiv CNI binaries and CNI network config file on each node.
+ - name: contiv-cni
+ image: iecedge/cni:v3.2.1
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SLEEP
+ value: "false"
+ volumeMounts:
+ - mountPath: /opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /etc/cni/net.d
+ name: cni-net-dir
+ - mountPath: /cni/cfg
+ name: contiv-cni-cfg
+ - mountPath: /var/run/contiv
+ name: contiv-run
+
+ # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
+ - name: vpp-init
+ image: iecedge/vswitch:v3.2.1
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - |
+ set -eu
+ chmod 700 /run/vpp
+ rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
+ if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
+ cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
+ fi
+ if [ ! -d /var/run/contiv ]; then
+ mkdir /var/run/contiv
+ fi
+ chmod 700 /var/run/contiv
+ rm -f /var/run/contiv/cni.sock
+ if ip link show vpp1 >/dev/null 2>&1; then
+ ip link del vpp1
+ fi
+ cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
+ sysctl -w debug.exception-trace=1
+ sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
+ ulimit -c unlimited
+ echo 2 > /proc/sys/fs/suid_dumpable
+ # replace localhost IP by node IP since node port doesn't work
+ # on localhost IP in a certain scenario
+ cp /etc/etcd/etcd.conf /tmp/etcd.conf
+ set +e
+ echo "$HOST_IP" | grep -q ':'
+ if [ "$?" -eq "0" ];
+ then
+ HOST_IP="[$HOST_IP]"
+ fi
+ sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: usr-local-bin
+ mountPath: /host/usr/local/bin
+ - name: vpp-cfg
+ mountPath: /host/etc/vpp
+ - name: shm
+ mountPath: /dev/shm
+ - name: vpp-run
+ mountPath: /run/vpp
+ - name: contiv-run
+ mountPath: /var/run/contiv
+ - name: tmp
+ mountPath: /tmp
+ - name: etcd-cfg
+ mountPath: /etc/etcd
+ - name: core-dumps
+ mountPath: /var/contiv/dumps
+
+ containers:
+ # Runs contiv-vswitch container on each Kubernetes node.
+ # It contains the vSwitch VPP and its management agent.
+ - name: contiv-vswitch
+ image: iecedge/vswitch:v3.2.1
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ # readiness + liveness probe
+ - containerPort: 9999
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9999
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 15
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9999
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 60
+ env:
+ - name: MICROSERVICE_LABEL
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: GOVPPMUX_NOSOCK
+ value: "1"
+ - name: CONTIV_CONFIG
+ value: "/etc/contiv/contiv.conf"
+ - name: CONTROLLER_CONFIG
+ value: "/etc/contiv/controller.conf"
+ - name: SERVICE_CONFIG
+ value: "/etc/contiv/service.conf"
+ - name: ETCD_CONFIG
+ value: "/tmp/etcd.conf"
+ - name: BOLT_CONFIG
+ value: "/etc/vpp-agent/bolt.conf"
+ # Uncomment to log graph traversal (very verbose):
+ # - name: KVSCHED_LOG_GRAPH_WALK
+ # value: "true"
+ # Uncomment to verify effect of every transaction:
+ # - name: KVSCHED_VERIFY_MODE
+ # value: "true"
+ - name: TELEMETRY_CONFIG
+ value: "/etc/vpp-agent/telemetry.conf"
+ - name: GOVPP_CONFIG
+ value: "/etc/vpp-agent/govpp.conf"
+ - name: LOGS_CONFIG
+ value: "/etc/vpp-agent/logs.conf"
+ - name: HTTP_CONFIG
+ value: "/etc/vpp-agent/http.conf"
+ - name: GRPC_CONFIG
+ value: "/etc/vpp-agent/grpc.conf"
+ - name: LINUX_IFPLUGIN_CONFIG
+ value: "/etc/vpp-agent/linux-ifplugin.conf"
+ - name: LINUX_L3PLUGIN_CONFIG
+ value: "/etc/vpp-agent/linux-l3plugin.conf"
+ - name: KVSCHEDULER_CONFIG
+ value: "/etc/vpp-agent/kvscheduler.conf"
+ volumeMounts:
+ - name: var-bolt
+ mountPath: /var/bolt
+ - name: etcd-cfg
+ mountPath: /etc/etcd
+ - name: vpp-cfg
+ mountPath: /etc/vpp
+ - name: shm
+ mountPath: /dev/shm
+ - name: dev
+ mountPath: /dev
+ - name: sys-bus-pci
+ mountPath: /sys/bus/pci
+ - name: vpp-run
+ mountPath: /run/vpp
+ - name: contiv-run
+ mountPath: /var/run/contiv
+ - name: contiv-agent-cfg
+ mountPath: /etc/contiv
+ - name: vpp-agent-cfg
+ mountPath: /etc/vpp-agent
+ - name: tmp
+ mountPath: /tmp
+ - name: core-dumps
+ mountPath: /var/contiv/dumps
+ - name: docker-socket
+ mountPath: /var/run/docker.sock
+ resources:
+ requests:
+ cpu: 250m
+
+ volumes:
+ # Used to connect to contiv-etcd.
+ - name: etcd-cfg
+ configMap:
+ name: contiv-etcd-cfg
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+ # VPP startup config folder.
+ - name: vpp-cfg
+ hostPath:
+ path: /etc/vpp
+ # To install vppctl.
+ - name: usr-local-bin
+ hostPath:
+ path: /usr/local/bin
+ # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication
+ # with VPP (/dev/shm)
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: shm
+ hostPath:
+ path: /dev/shm
+ # /sys/bus/pci is required for binding PCI devices to specific drivers
+ - name: sys-bus-pci
+ hostPath:
+ path: /sys/bus/pci
+ # For CLI unix socket.
+ - name: vpp-run
+ hostPath:
+ path: /run/vpp
+ # For CNI / STN unix domain socket
+ - name: contiv-run
+ hostPath:
+ path: /var/run/contiv
+ # Used to configure contiv agent.
+ - name: contiv-agent-cfg
+ configMap:
+ name: contiv-agent-cfg
+ # Used to configure vpp agent.
+ - name: vpp-agent-cfg
+ configMap:
+ name: vpp-agent-cfg
+ # Used for vswitch core dumps
+ - name: core-dumps
+ hostPath:
+ path: /var/contiv/dumps
+ # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
+ - name: tmp
+ emptyDir:
+ medium: Memory
+ # persisted bolt data
+ - name: var-bolt
+ hostPath:
+ path: /var/bolt
+ - name: docker-socket
+ hostPath:
+ path: /var/run/docker.sock
+ # CNI config
+ - name: contiv-cni-cfg
+ configMap:
+ name: contiv-cni-cfg
+
+---
+# This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
+# It consists of the following containers:
+# - contiv-vswitch container: contains VPP and its management agent
+# - contiv-cni container: installs CNI on the host
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-vswitch-arm64
+ namespace: kube-system
+ labels:
+ k8s-app: contiv-vswitch
+spec:
+ selector:
+ matchLabels:
+ k8s-app: contiv-vswitch
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: contiv-vswitch
+ annotations:
+ # Marks this pod as a critical add-on.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+ - key: ''
+ operator: Exists
+ effect: ''
+ # This likely isn't needed due to the above wildcard, but keep it in for now.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ nodeSelector:
+ beta.kubernetes.io/arch: arm64
+ hostNetwork: true
+ hostPID: true
+
+ # Init containers are executed before regular containers, must finish successfully before regular ones
+ # are started.
+ initContainers:
+ # This container installs the Contiv CNI binaries and CNI network config file on each node.
+ - name: contiv-cni
+ image: iecedge/cni-arm64:v3.2.1
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: SLEEP
+ value: "false"
+ volumeMounts:
+ - mountPath: /opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /etc/cni/net.d
+ name: cni-net-dir
+ - mountPath: /cni/cfg
+ name: contiv-cni-cfg
+ - mountPath: /var/run/contiv
+ name: contiv-run
+
+ # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
+ - name: vpp-init
+ image: iecedge/vswitch-arm64:v3.2.1
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - |
+ set -eu
+ chmod 700 /run/vpp
+ rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
+ if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
+ cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
+ fi
+ if [ ! -d /var/run/contiv ]; then
+ mkdir /var/run/contiv
+ fi
+ chmod 700 /var/run/contiv
+ rm -f /var/run/contiv/cni.sock
+ if ip link show vpp1 >/dev/null 2>&1; then
+ ip link del vpp1
+ fi
+ cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
+ sysctl -w debug.exception-trace=1
+ sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
+ ulimit -c unlimited
+ echo 2 > /proc/sys/fs/suid_dumpable
+ # replace localhost IP by node IP since node port doesn't work
+ # on localhost IP in a certain scenario
+ cp /etc/etcd/etcd.conf /tmp/etcd.conf
+ set +e
+ echo "$HOST_IP" | grep -q ':'
+ if [ "$?" -eq "0" ];
+ then
+ HOST_IP="[$HOST_IP]"
+ fi
+ sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
+ resources: {}
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: usr-local-bin
+ mountPath: /host/usr/local/bin
+ - name: vpp-cfg
+ mountPath: /host/etc/vpp
+ - name: shm
+ mountPath: /dev/shm
+ - name: vpp-run
+ mountPath: /run/vpp
+ - name: contiv-run
+ mountPath: /var/run/contiv
+ - name: tmp
+ mountPath: /tmp
+ - name: etcd-cfg
+ mountPath: /etc/etcd
+ - name: core-dumps
+ mountPath: /var/contiv/dumps
+
+ containers:
+ # Runs contiv-vswitch container on each Kubernetes node.
+ # It contains the vSwitch VPP and its management agent.
+ - name: contiv-vswitch
+ image: iecedge/vswitch-arm64:v3.2.1
+ imagePullPolicy: IfNotPresent
+ securityContext:
+ privileged: true
+ ports:
+ # readiness + liveness probe
+ - containerPort: 9999
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9999
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 15
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9999
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 60
+ env:
+ - name: MICROSERVICE_LABEL
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: GOVPPMUX_NOSOCK
+ value: "1"
+ - name: CONTIV_CONFIG
+ value: "/etc/contiv/contiv.conf"
+ - name: CONTROLLER_CONFIG
+ value: "/etc/contiv/controller.conf"
+ - name: SERVICE_CONFIG
+ value: "/etc/contiv/service.conf"
+ - name: ETCD_CONFIG
+ value: "/tmp/etcd.conf"
+ - name: BOLT_CONFIG
+ value: "/etc/vpp-agent/bolt.conf"
+ # Uncomment to log graph traversal (very verbose):
+ # - name: KVSCHED_LOG_GRAPH_WALK
+ # value: "true"
+ # Uncomment to verify effect of every transaction:
+ # - name: KVSCHED_VERIFY_MODE
+ # value: "true"
+ - name: TELEMETRY_CONFIG
+ value: "/etc/vpp-agent/telemetry.conf"
+ - name: GOVPP_CONFIG
+ value: "/etc/vpp-agent/govpp.conf"
+ - name: LOGS_CONFIG
+ value: "/etc/vpp-agent/logs.conf"
+ - name: HTTP_CONFIG
+ value: "/etc/vpp-agent/http.conf"
+ - name: GRPC_CONFIG
+ value: "/etc/vpp-agent/grpc.conf"
+ - name: LINUX_IFPLUGIN_CONFIG
+ value: "/etc/vpp-agent/linux-ifplugin.conf"
+ - name: LINUX_L3PLUGIN_CONFIG
+ value: "/etc/vpp-agent/linux-l3plugin.conf"
+ - name: KVSCHEDULER_CONFIG
+ value: "/etc/vpp-agent/kvscheduler.conf"
+ volumeMounts:
+ - name: var-bolt
+ mountPath: /var/bolt
+ - name: etcd-cfg
+ mountPath: /etc/etcd
+ - name: vpp-cfg
+ mountPath: /etc/vpp
+ - name: shm
+ mountPath: /dev/shm
+ - name: dev
+ mountPath: /dev
+ - name: sys-bus-pci
+ mountPath: /sys/bus/pci
+ - name: vpp-run
+ mountPath: /run/vpp
+ - name: contiv-run
+ mountPath: /var/run/contiv
+ - name: contiv-agent-cfg
+ mountPath: /etc/contiv
+ - name: vpp-agent-cfg
+ mountPath: /etc/vpp-agent
+ - name: tmp
+ mountPath: /tmp
+ - name: core-dumps
+ mountPath: /var/contiv/dumps
+ - name: docker-socket
+ mountPath: /var/run/docker.sock
+ resources:
+ requests:
+ cpu: 250m
+
+ volumes:
+ # Used to connect to contiv-etcd.
+ - name: etcd-cfg
+ configMap:
+ name: contiv-etcd-cfg
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+ # VPP startup config folder.
+ - name: vpp-cfg
+ hostPath:
+ path: /etc/vpp
+ # To install vppctl.
+ - name: usr-local-bin
+ hostPath:
+ path: /usr/local/bin
+ # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication with
+ # VPP (/dev/shm)
+ - name: dev
+ hostPath:
+ path: /dev
+ - name: shm
+ hostPath:
+ path: /dev/shm
+ # /sys/bus/pci is required for binding PCI devices to specific drivers
+ - name: sys-bus-pci
+ hostPath:
+ path: /sys/bus/pci
+ # For CLI unix socket.
+ - name: vpp-run
+ hostPath:
+ path: /run/vpp
+ # For CNI / STN unix domain socket
+ - name: contiv-run
+ hostPath:
+ path: /var/run/contiv
+ # Used to configure contiv agent.
+ - name: contiv-agent-cfg
+ configMap:
+ name: contiv-agent-cfg
+ # Used to configure vpp agent.
+ - name: vpp-agent-cfg
+ configMap:
+ name: vpp-agent-cfg
+ # Used for vswitch core dumps
+ - name: core-dumps
+ hostPath:
+ path: /var/contiv/dumps
+ # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
+ - name: tmp
+ emptyDir:
+ medium: Memory
+ # persisted bolt data
+ - name: var-bolt
+ hostPath:
+ path: /var/bolt
+ - name: docker-socket
+ hostPath:
+ path: /var/run/docker.sock
+ # CNI config
+ - name: contiv-cni-cfg
+ configMap:
+ name: contiv-cni-cfg
+
+---
+# This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-ksr-amd64
+ namespace: kube-system
+ labels:
+ k8s-app: contiv-ksr
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: contiv-ksr
+ annotations:
+ # Marks this pod as a critical add-on.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+ - key: ''
+ operator: Exists
+ effect: ''
+ # This likely isn't needed due to the above wildcard, but keep it in for now.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ beta.kubernetes.io/arch: amd64
+ hostNetwork: true
+ # This grants the required permissions to contiv-ksr.
+ serviceAccountName: contiv-ksr
+
+ initContainers:
+ # This init container waits until etcd is started
+ - name: wait-foretcd
+ env:
+ - name: ETCDPORT
+ value: "32379"
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ image: busybox:1.29.3
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - |
+ cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+ echo "$HOST_IP" | grep -q ':'
+ if [ "$?" -eq "0" ];
+ then
+ HOST_IP="[$HOST_IP]"
+ fi
+ sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+ until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+ volumeMounts:
+ - name: tmp-cfg
+ mountPath: /tmp/cfg
+ - name: etcd-cfg
+ mountPath: /etc/etcd
+
+
+ containers:
+ - name: contiv-ksr
+ image: iecedge/ksr:v3.2.1
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: ETCD_CONFIG
+ value: "/tmp/cfg/etcd.conf"
+ - name: HTTP_CONFIG
+ value: "/etc/http/http.conf"
+ volumeMounts:
+ - name: tmp-cfg
+ mountPath: /tmp/cfg
+ - name: http-cfg
+ mountPath: /etc/http
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 30
+ resources:
+ requests:
+ cpu: 100m
+ volumes:
+ # Used to connect to contiv-etcd.
+ - name: etcd-cfg
+ configMap:
+ name: contiv-etcd-withcompact-cfg
+ - name: tmp-cfg
+ emptyDir: {}
+ - name: http-cfg
+ configMap:
+ name: contiv-ksr-http-cfg
+
+---
+# This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-ksr-arm64
+ namespace: kube-system
+ labels:
+ k8s-app: contiv-ksr
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: contiv-ksr
+ annotations:
+ # Marks this pod as a critical add-on.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+ - key: ''
+ operator: Exists
+ effect: ''
+ # This likely isn't needed due to the above wildcard, but keep it in for now.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ beta.kubernetes.io/arch: arm64
+ hostNetwork: true
+ # This grants the required permissions to contiv-ksr.
+ serviceAccountName: contiv-ksr
+
+ initContainers:
+ # This init container waits until etcd is started
+ - name: wait-foretcd
+ env:
+ - name: ETCDPORT
+ value: "32379"
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ image: arm64v8/busybox:1.29.3
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - |
+ cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+ echo "$HOST_IP" | grep -q ':'
+ if [ "$?" -eq "0" ];
+ then
+ HOST_IP="[$HOST_IP]"
+ fi
+ sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+ until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+ volumeMounts:
+ - name: tmp-cfg
+ mountPath: /tmp/cfg
+ - name: etcd-cfg
+ mountPath: /etc/etcd
+
+
+ containers:
+ - name: contiv-ksr
+ image: iecedge/ksr-arm64:v3.2.1
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: ETCD_CONFIG
+ value: "/tmp/cfg/etcd.conf"
+ - name: HTTP_CONFIG
+ value: "/etc/http/http.conf"
+ volumeMounts:
+ - name: tmp-cfg
+ mountPath: /tmp/cfg
+ - name: http-cfg
+ mountPath: /etc/http
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9191
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9191
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 30
+ resources:
+ requests:
+ cpu: 100m
+ volumes:
+ # Used to connect to contiv-etcd.
+ - name: etcd-cfg
+ configMap:
+ name: contiv-etcd-withcompact-cfg
+ - name: tmp-cfg
+ emptyDir: {}
+ - name: http-cfg
+ configMap:
+ name: contiv-ksr-http-cfg
+
+---
+
+# This cluster role defines a set of permissions required for contiv-ksr.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: contiv-ksr
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - ""
+ - extensions
+ resources:
+ - pods
+ - namespaces
+ - networkpolicies
+ - services
+ - endpoints
+ - nodes
+ verbs:
+ - watch
+ - list
+
+---
+
+# This defines a service account for contiv-ksr.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: contiv-ksr
+ namespace: kube-system
+
+---
+
+# This binds the contiv-ksr cluster role with contiv-ksr service account.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: contiv-ksr
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: contiv-ksr
+subjects:
+ - kind: ServiceAccount
+ name: contiv-ksr
+ namespace: kube-system
+
+---
+
+# This installs the contiv-crd on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-crd-amd64
+ namespace: kube-system
+ labels:
+ k8s-app: contiv-crd
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: contiv-crd
+ annotations:
+ # Marks this pod as a critical add-on.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+ - key: ''
+ operator: Exists
+ effect: ''
+ # This likely isn't needed due to the above wildcard, but keep it in for now.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ beta.kubernetes.io/arch: amd64
+ hostNetwork: true
+ # This grants the required permissions to contiv-crd.
+ serviceAccountName: contiv-crd
+
+ initContainers:
+ # This init container waits until etcd is started
+ - name: wait-foretcd
+ env:
+ - name: ETCDPORT
+ value: "32379"
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ image: busybox:1.29.3
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - |
+ cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+ echo "$HOST_IP" | grep -q ':'
+ if [ "$?" -eq "0" ];
+ then
+ HOST_IP="[$HOST_IP]"
+ fi
+ sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+ until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+ volumeMounts:
+ - name: tmp-cfg
+ mountPath: /tmp/cfg
+ - name: etcd-cfg
+ mountPath: /etc/etcd
+
+ # This init container copies contiv-netctl tool to the host.
+ - name: netctl-init
+ image: iecedge/crd:v3.2.1
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - |
+ echo '#!/bin/sh
+ kubectl get pods -n kube-system | \
+ grep contiv-crd | \
+ cut -d " " -f 1 | \
+ xargs -I{} kubectl exec -n kube-system {} \
+ /contiv-netctl "$@"' \
+ > /host/usr/local/bin/contiv-netctl || true
+ chmod +x /host/usr/local/bin/contiv-netctl || true
+ volumeMounts:
+ - name: usr-local-bin
+ mountPath: /host/usr/local/bin
+
+ containers:
+ - name: contiv-crd
+ image: iecedge/crd:v3.2.1
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: ETCD_CONFIG
+ value: "/tmp/cfg/etcd.conf"
+ - name: HTTP_CONFIG
+ value: "/etc/http/http.conf"
+ - name: HTTP_CLIENT_CONFIG
+ value: "/etc/http/http.client.conf"
+ - name: CONTIV_CRD_VALIDATE_INTERVAL
+ value: "5"
+ - name: CONTIV_CRD_VALIDATE_STATE
+ value: "SB"
+ - name: DISABLE_NETCTL_REST
+ value: "true"
+ volumeMounts:
+ - name: tmp-cfg
+ mountPath: /tmp/cfg
+ - name: http-cfg
+ mountPath: /etc/http
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9090
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9090
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 30
+ resources:
+ requests:
+ cpu: 100m
+
+ volumes:
+ # Used to connect to contiv-etcd.
+ - name: etcd-cfg
+ configMap:
+ name: contiv-etcd-cfg
+ - name: usr-local-bin
+ hostPath:
+ path: /usr/local/bin
+ - name: http-cfg
+ configMap:
+ name: contiv-crd-http-cfg
+ - name: tmp-cfg
+ emptyDir: {}
+---
+# This installs the contiv-crd on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: contiv-crd-arm64
+ namespace: kube-system
+ labels:
+ k8s-app: contiv-crd
+spec:
+ updateStrategy:
+ type: RollingUpdate
+ template:
+ metadata:
+ labels:
+ k8s-app: contiv-crd
+ annotations:
+ # Marks this pod as a critical add-on.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+ - key: ''
+ operator: Exists
+ effect: ''
+ # This likely isn't needed due to the above wildcard, but keep it in for now.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ beta.kubernetes.io/arch: arm64
+ hostNetwork: true
+ # This grants the required permissions to contiv-crd.
+ serviceAccountName: contiv-crd
+
+ initContainers:
+ # This init container waits until etcd is started
+ - name: wait-foretcd
+ env:
+ - name: ETCDPORT
+ value: "32379"
+ - name: HOST_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ image: arm64v8/busybox:1.29.3
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - |
+ cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+ echo "$HOST_IP" | grep -q ':'
+ if [ "$?" -eq "0" ];
+ then
+ HOST_IP="[$HOST_IP]"
+ fi
+ sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+ until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+ volumeMounts:
+ - name: tmp-cfg
+ mountPath: /tmp/cfg
+ - name: etcd-cfg
+ mountPath: /etc/etcd
+
+ # This init container copies contiv-netctl tool to the host.
+ - name: netctl-init
+ image: iecedge/crd-arm64:v3.2.1
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sh
+ args:
+ - -c
+ - |
+ echo '#!/bin/sh
+ kubectl get pods -n kube-system | \
+ grep contiv-crd | \
+ cut -d " " -f 1 | \
+ xargs -I{} kubectl exec -n kube-system {} \
+ /contiv-netctl "$@"' \
+ > /host/usr/local/bin/contiv-netctl || true
+ chmod +x /host/usr/local/bin/contiv-netctl || true
+ volumeMounts:
+ - name: usr-local-bin
+ mountPath: /host/usr/local/bin
+
+ containers:
+ - name: contiv-crd
+ image: iecedge/crd-arm64:v3.2.1
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: ETCD_CONFIG
+ value: "/tmp/cfg/etcd.conf"
+ - name: HTTP_CONFIG
+ value: "/etc/http/http.conf"
+ - name: HTTP_CLIENT_CONFIG
+ value: "/etc/http/http.client.conf"
+ - name: CONTIV_CRD_VALIDATE_INTERVAL
+ value: "5"
+ - name: CONTIV_CRD_VALIDATE_STATE
+ value: "SB"
+ - name: DISABLE_NETCTL_REST
+ value: "true"
+ volumeMounts:
+ - name: tmp-cfg
+ mountPath: /tmp/cfg
+ - name: http-cfg
+ mountPath: /etc/http
+ readinessProbe:
+ httpGet:
+ path: /readiness
+ port: 9090
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 10
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9090
+ periodSeconds: 3
+ timeoutSeconds: 2
+ failureThreshold: 3
+ initialDelaySeconds: 30
+ resources:
+ requests:
+ cpu: 100m
+
+ volumes:
+ # Used to connect to contiv-etcd.
+ - name: etcd-cfg
+ configMap:
+ name: contiv-etcd-cfg
+ - name: usr-local-bin
+ hostPath:
+ path: /usr/local/bin
+ - name: http-cfg
+ configMap:
+ name: contiv-crd-http-cfg
+ - name: tmp-cfg
+ emptyDir: {}
+---
+
+# This cluster role defines a set of permissions required for contiv-crd.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+ name: contiv-crd
+ namespace: kube-system
+rules:
+ - apiGroups:
+ - apiextensions.k8s.io
+ - nodeconfig.contiv.vpp
+ - telemetry.contiv.vpp
+ resources:
+ - customresourcedefinitions
+ - telemetryreports
+ - nodeconfigs
+ verbs:
+ - "*"
+
+---
+
+# This defines a service account for contiv-crd.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: contiv-crd
+ namespace: kube-system
+
+---
+
+# This binds the contiv-crd cluster role with contiv-crd service account.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: contiv-crd
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: contiv-crd
+subjects:
+ - kind: ServiceAccount
+ name: contiv-crd
+ namespace: kube-system
+
+---
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: contiv-crd-http-cfg
+ namespace: kube-system
+data:
+ http.conf: |
+ endpoint: "0.0.0.0:9090"
+ http.client.conf: |
+ port: 9999
+ use-https: false