Merge "Add uninstallation scripts"
authorTrevor Tao <trevor.tao@arm.com>
Wed, 13 Nov 2019 06:57:16 +0000 (06:57 +0000)
committerGerrit Code Review <gerrit@akraino.org>
Wed, 13 Nov 2019 06:57:16 +0000 (06:57 +0000)
21 files changed:
docs/release/installation/contiv-vpp_setup.rst [new file with mode: 0644]
misc/type1/macbin/defconfig-mcbin-edge
src/foundation/scripts/cni/contivpp/contiv-vpp-macbin.yaml [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/install-ovn-k8s.sh [new file with mode: 0755]
src/foundation/scripts/cni/ovn-kubernetes/push-manifest.sh [new file with mode: 0755]
src/foundation/scripts/cni/ovn-kubernetes/templates/cleanup-ovn-cni.conf.j2 [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/templates/ovn-setup.yaml.j2 [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-db-vip.yaml.j2 [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-db.yaml.j2 [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-master.yaml.j2 [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-node.yaml.j2 [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/uninstall-ovn-k8s.sh [new file with mode: 0755]
src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-db-vip.yaml [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-db.yaml [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-master.yaml [new file with mode: 0644]
src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-node.yaml [new file with mode: 0644]
src/foundation/scripts/config
src/foundation/scripts/k8s_master.sh
src/foundation/scripts/setup-cni.sh
src/foundation/scripts/startup.sh
src/use_cases/seba_on_arm/src_repo/seba_charts

diff --git a/docs/release/installation/contiv-vpp_setup.rst b/docs/release/installation/contiv-vpp_setup.rst
new file mode 100644 (file)
index 0000000..f9ee4a2
--- /dev/null
@@ -0,0 +1,257 @@
+Contiv-VPP Setup
+================
+
+This document describes how to deployment IEC platform with Contiv-VPP
+networking on bare metal hosts.The automatic deployment script provided
+by IEC uses calico CNI by default. To enable Contiv-VPP network solution
+for Kubernetes, you need to make some minor modifications.Now the IEC
+only supports multiple NICs deployment, and does not support Configuring
+STN for the time being.In addition, the deployment methods of IEC type1
+and type2 are slightly different, and will be introduced in different
+chapters.
+
+Setting up for IEC type2
+------------------------
+
+IEC type2 deploy on large and powerful business servers.The main
+installation steps as following:
+
+Setting up DPDK
+~~~~~~~~~~~~~~~
+
+ALL port that are to be used by an DPDK aplication must be bound to the
+uio_pci_generic, igb_uio or vfio-pci module before the application is
+run, more detail info please refer `DPDK DOC`_.
+
+The following guide will use vfio-pci. Load kernel module
+
+::
+
+    $ sudo modprobe vfio-pci
+
+Verify that PCI driver has loaded successfully
+
+::
+
+    $ lsmod |grep pci
+    vfio_pci               49152  0
+    vfio_virqfd            16384  1 vfio_pci
+    vfio_iommu_type1       24576  0
+    vfio                   40960  2 vfio_iommu_type1,vfio_pci
+
+Determining network adapter that vpp to use
+
+::
+
+    $ sudo lshw -class network -businfo
+    Bus info          Device       Class      Description
+    ================================================
+    pci@0000:89:00.0  enp137s0f0   network    Ethernet Controller X710 for 10GbE SFP+
+    pci@0000:89:00.1  enp137s0f1   network    Ethernet Controller X710 for 10GbE SFP+
+
+In this example, enp137s0f1 used by vpp and binding to kernel module:
+
+::
+
+    $ sudo ~/dpdk/usertools/dpdk-devbind.py --bind=vfio-pci enp137s0f1
+
+The script dpdk-devbind.py in `DPDK`_ repo.
+
+Automation deployment
+~~~~~~~~~~~~~~~~~~~~~
+
+As a minimum requirement 3 nodes are needed: jumpserver, master node and
+worker node. The two kinds nodes are configured by different script.
+
+-  Modify default network solution
+
+.. code:: diff
+
+    --- a/src/foundation/scripts/config
+    +++ b/src/foundation/scripts/config
+    @@ -30,7 +30,7 @@ K8S_WORKER_GROUP=(
+     CLUSTER_IP=172.16.1.136 # Align with the value in our K8s setup script
+     POD_NETWORK_CIDR=192.168.0.0/16
+     #IEC support three kinds network solution for Kubernetes: calico,flannel,contivpp
+    -CNI_TYPE=calico
+    +CNI_TYPE=contivpp
+     #kubernetes-cni version 0.7.5/ 0.6.0
+     CNI_VERSION=0.6.0
+
+-  Master node configuration
+
+Initialize DEV_NAME for master node,Instantiate the fourth argument of the setup-cni.sh script
+
+.. code:: diff
+
+    --- a/src/foundation/scripts/startup.sh
+    +++ b/src/foundation/scripts/startup.sh
+    @@ -99,7 +99,7 @@ deploy_k8s () {
+       #Deploy etcd & CNI from master node
+    -  SETUP_CNI="cd iec/src/foundation/scripts && source setup-cni.sh $CLUSTER_IP $POD_NETWORK_CIDR $CNI_TYPE"
+    +  SETUP_CNI="cd iec/src/foundation/scripts && source setup-cni.sh $CLUSTER_IP $POD_NETWORK_CIDR $CNI_TYPE enp137s0f1"
+       sshpass -p ${K8S_MASTERPW} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${K8S_MASTER_IP} ${SETUP_CNI}
+       SETUP_HELM="cd iec/src/foundation/scripts && source helm.sh"
+
+The modified result is as follows
+
+.. code:: diff
+
+    --- a/src/foundation/scripts/setup-cni.sh
+    +++ b/src/foundation/scripts/setup-cni.sh
+    @@ -11,7 +11,7 @@ fi
+     CLUSTER_IP=${1:-172.16.1.136} # Align with the value in our K8s setup script
+     POD_NETWORK_CIDR=${2:-192.168.0.0/16}
+     CNI_TYPE=${3:-calico}
+    -DEV_NAME=${4:-}
+    +DEV_NAME=${4:-enp137s0f1}
+
+-  Worker node configuration
+
+The same as master node, worker node need setting up DPDK and
+determining network adapter. Initialize DEV_NAME for work node
+
+.. code:: diff
+
+    --- a/src/foundation/scripts/config
+    +++ b/src/foundation/scripts/config
+    @@ -42,4 +42,4 @@ KUBE_VERSION=1.13.0
+     #  [10.169.40.106]="enp137s0f0"
+     #  )
+     declare -A DEV_NAME
+    -DEV_NAME=()
+    +DEV_NAME=([10.169.40.106]="enp137s0f0")
+
+DEV_NAME is an associative array, list network interface device names
+used by contivpp. Use IP address of K8S_WORKER_GROUP as key.
+
+-  Launch setup
+
+Simply start the installation script startup.sh on jumpserver:
+
+::
+
+    jenkins@jumpserver:~/iec/src/foundation/scripts$ ./startup.sh
+
+for more details and information refer to `installation.instruction.rst`_
+
+Setting up for IEC type1
+------------------------
+
+IEC type1 device is suitable for low power device.Now we choose
+`MACCHIATObin`_ board as the main hardware
+platform.
+
+Install MUSDK
+~~~~~~~~~~~~~
+
+Marvell User-Space SDK(`MUSDK`_)
+is a light-weight user-space I/O driver for Marvell's Embedded
+Networking SoC's, more detail info please refer `VPP Marvell plugin`_
+
+Automation deployment
+~~~~~~~~~~~~~~~~~~~~~
+
+-  Modify default yaml
+
+.. code:: diff
+
+    diff --git a/src/foundation/scripts/setup-cni.sh b/src/foundation/scripts/setup-cni.sh
+    index d466831..6993006 100755
+    --- a/src/foundation/scripts/setup-cni.sh
+    +++ b/src/foundation/scripts/setup-cni.sh
+    @@ -43,7 +43,7 @@ install_contivpp(){
+
+       # Install contivpp CNI
+       sed -i "s@10.1.0.0/16@${POD_NETWORK_CIDR}@" "${SCRIPTS_DIR}/cni/contivpp/contiv-vpp.yaml"
+    -  kubectl apply -f "${SCRIPTS_DIR}/cni/contivpp/contiv-vpp.yaml"
+    +  kubectl apply -f "${SCRIPTS_DIR}/cni/contivpp/contiv-vpp-macbin.yaml"
+     }
+
+-  Configuration
+
+To configure a PP2 interface, MainVppInterface with the prefix mv-ppio-
+must be configured in the NodeConfig section of the deployment yaml.
+mv-ppio-X/Y is VPP interface name where X is PP2 device ID and Y is PPIO
+ID Interface needs to be assigned to MUSDK in FDT configuration and
+linux interface state must be up. Example configuration:
+
+::
+
+    ~/iec/src/foundation/scripts/cni/contivpp/contiv-vpp-macbin.yaml
+        nodeConfig:
+        - nodeName: net-arm-mcbin-iec
+          mainVppInterface:
+            interfaceName: mv-ppio-0/0
+        - nodeName: net-arm-mcbin-iec-1
+          mainVppInterface:
+            interfaceName: mv-ppio-0/0
+
+PP2 doesn't have any dependency on DPDK or DPDK plugin but it can work
+with DPDK plugin enabled or disabled.It is observed that performace is
+better around 30% when DPDK plugin is disabled. DPKD plugin can be
+disabled by adding following config to the contiv-vswitch.conf.
+
+.. code:: diff
+
+    --- a/src/foundation/scripts/cni/contivpp/contiv-vswitch.conf
+    +++ b/src/foundation/scripts/cni/contivpp/contiv-vswitch.conf
+    @@ -24,3 +24,7 @@ socksvr {
+     statseg {
+        default
+     }
+    +plugins {
+    +        plugin vpp_plugin.so { enable }
+    +        plugin dpdk_plugin.so { disable }
+    +}
+
+-  Modify scripts
+
+It`s necessary to modify relevant script as IEC type2 to support automatic deployment.
+
+-  Launch setup
+
+Simply start the installation script startup.sh on jumpserver:
+
+::
+
+    jenkins@jumpserver:~/iec/src/foundation/scripts$ ./startup.sh
+
+for more details and information refer to
+`installation.instruction.rst`_
+
+Deployment Verification
+-----------------------
+
+invok ./src/foundation/scripts/nginx.sh install nginx; to test if CNI
+enviroment is ready.
+
+Uninstalling Contiv-VPP
+-----------------------
+
+To uninstall the network plugin for type2:
+
+::
+
+    kubectl delete -f  ./iec/src/foundation/scripts/cni/contivpp/contiv-vpp.yaml
+
+To uninstall the network plugin for type1:
+
+::
+
+    kubectl delete -f  ./iec/src/foundation/scripts/cni/contivpp/contiv-vpp-macbin.yaml
+
+In order to remove the persisted config, cleanup the bolt and ETCD
+storage:
+
+::
+
+    rm -rf /var/etcd/contiv-data
+
+.. All links go below this line
+.. _`DPDK DOC`: https://doc.dpdk.org/guides/linux_gsg/linux_drivers.html#binding-and-unbinding-network-ports-to-from-the-kernel-modules
+.. _`DPDK`: https://github.com/DPDK/dpdk/blob/master/usertools/dpdk-devbind.py
+.. _`installation.instruction.rst`: ./installation.instruction.rst
+.. _`MACCHIATObin`: http://macchiatobin.net
+.. _`MUSDK`: https://github.com/MarvellEmbeddedProcessors/musdk-marvell
+.. _`VPP Marvell plugin`: https://github.com/FDio/vpp/blob/master/src/plugins/marvell/README.md
index f1a26d6..2d1646e 100644 (file)
@@ -226,6 +226,8 @@ CONFIG_VLAN_8021Q_GVRP=y
 CONFIG_VLAN_8021Q_MVRP=y
 CONFIG_NET_SCHED=y
 CONFIG_NET_CLS_CGROUP=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
 CONFIG_NETLINK_DIAG=y
 CONFIG_MPLS=y
 CONFIG_NET_MPLS_GSO=y
@@ -285,6 +287,7 @@ CONFIG_DUMMY=y
 CONFIG_MACVLAN=y
 CONFIG_MACVTAP=y
 CONFIG_IPVLAN=y
+CONFIG_IPVTAP=y
 CONFIG_VXLAN=y
 CONFIG_TUN=y
 CONFIG_VETH=y
@@ -548,6 +551,8 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=y
+CONFIG_VHOST_NET=y
+CONFIG_VHOST_VSOCK=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
diff --git a/src/foundation/scripts/cni/contivpp/contiv-vpp-macbin.yaml b/src/foundation/scripts/cni/contivpp/contiv-vpp-macbin.yaml
new file mode 100644 (file)
index 0000000..302d9a6
--- /dev/null
@@ -0,0 +1,986 @@
+---
+# Source: contiv-vpp/templates/vpp.yaml
+# Contiv-VPP deployment YAML file. This deploys Contiv VPP networking on a Kuberntes cluster.
+# The deployment consists of the following components:
+#   - contiv-etcd - deployed on k8s master
+#   - contiv-vswitch - deployed on each k8s node
+#   - contiv-ksr - deployed on k8s master
+
+###########################################################
+#  Configuration
+###########################################################
+
+# This config map contains contiv-agent configuration. The most important part is the ipamConfig,
+# which may be updated in case the default IPAM settings do not match your needs.
+# nodeConfig may be used in case your nodes have more than 1 VPP interface. In that case, one
+# of them needs to be marked as the main inter-node interface, and the rest of them can be
+# configured with any IP addresses (the IPs cannot conflict with the main IPAM config).
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-agent-cfg
+  namespace: kube-system
+data:
+  contiv.conf: |-
+    nodeToNodeTransport: vxlan
+    useSRv6ForServices: false
+    useTAPInterfaces: true
+    tapInterfaceVersion: 2
+    tapv2RxRingSize: 256
+    tapv2TxRingSize: 256
+    enableGSO: false
+    tcpChecksumOffloadDisabled: true
+    STNVersion: 2
+    natExternalTraffic: true
+    mtuSize: 1450
+    scanIPNeighbors: true
+    ipNeighborScanInterval: 1
+    ipNeighborStaleThreshold: 4
+    enablePacketTrace: false
+    routeServiceCIDRToVPP: false
+    crdNodeConfigurationDisabled: true
+    ipamConfig:
+      nodeInterconnectDHCP: false
+      nodeInterconnectCIDR: 192.168.16.0/24
+      podSubnetCIDR: 10.1.0.0/16
+      podSubnetOneNodePrefixLen: 24
+      vppHostSubnetCIDR: 172.30.0.0/16
+      vppHostSubnetOneNodePrefixLen: 24
+      vxlanCIDR: 192.168.30.0/24
+      srv6:
+        servicePolicyBSIDSubnetCIDR: 8fff::/16
+        servicePodLocalSIDSubnetCIDR: 9300::/16
+        serviceHostLocalSIDSubnetCIDR: 9300::/16
+        serviceNodeLocalSIDSubnetCIDR: 9000::/16
+        nodeToNodePodLocalSIDSubnetCIDR: 9501::/16
+        nodeToNodeHostLocalSIDSubnetCIDR: 9500::/16
+        nodeToNodePodPolicySIDSubnetCIDR: 8501::/16
+        nodeToNodeHostPolicySIDSubnetCIDR: 8500::/16
+    nodeConfig:
+    - nodeName: net-arm-mcbin-iec
+      mainVppInterface:
+        interfaceName: mv-ppio-0/0
+    - nodeName: net-arm-mcbin-iec-1
+      mainVppInterface:
+        interfaceName: mv-ppio-0/0
+  controller.conf: |
+    enableRetry: true
+    delayRetry: 1000000000
+    maxRetryAttempts: 3
+    enableExpBackoffRetry: true
+    delayLocalResync: 5000000000
+    startupResyncDeadline: 30000000000
+    enablePeriodicHealing: false
+    periodicHealingInterval: 30000000000
+    delayAfterErrorHealing: 5000000000
+    remoteDBProbingInterval: 3000000000
+    recordEventHistory: true
+    eventHistoryAgeLimit: 60
+    permanentlyRecordedInitPeriod: 10
+  service.conf: |
+    cleanupIdleNATSessions: true
+    tcpNATSessionTimeout: 180
+    otherNATSessionTimeout: 5
+    serviceLocalEndpointWeight: 1
+    disableNATVirtualReassembly: false
+
+---
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: vpp-agent-cfg
+  namespace: kube-system
+data:
+  govpp.conf: |
+    health-check-probe-interval: 3000000000
+    health-check-reply-timeout: 500000000
+    health-check-threshold: 3
+    reply-timeout: 3000000000
+  logs.conf: |
+    default-level: debug
+    loggers:
+      - name: statscollector
+        level: info
+      - name: vpp.if-state
+        level: info
+      - name: linux.arp-conf
+        level: info
+      - name: vpp-rest
+        level: info
+  grpc.conf: |
+    network: unix
+    endpoint: /var/run/contiv/cni.sock
+    force-socket-removal: true
+    permission: 700
+  http.conf: |
+    endpoint: "0.0.0.0:9999"
+  bolt.conf: |
+    db-path: /var/bolt/bolt.db
+    file-mode: 432
+    lock-timeout: 0
+  telemetry.conf: |
+    polling-interval: 30000000000
+    disabled: true
+  linux-ifplugin.conf: |
+    dump-go-routines-count: 5
+  linux-l3plugin.conf: |
+    dump-go-routines-count: 5
+  kvscheduler.conf: |
+    record-transaction-history: true
+    transaction-history-age-limit: 60
+    permanently-recorded-init-period: 10
+
+---
+
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: contiv-cni-cfg
+  namespace: kube-system
+data:
+  # The CNI network configuration to install on each node. The special
+  # values in this config will be automatically populated.
+  10-contiv-vpp.conflist: |-
+    {
+      "name": "k8s-pod-network",
+      "cniVersion": "0.3.1",
+      "plugins": [
+        {
+          "type": "contiv-cni",
+          "grpcServer": "/var/run/contiv/cni.sock",
+          "logFile": "/var/run/contiv/cni.log"
+        },
+        {
+          "type": "portmap",
+          "capabilities": {
+              "portMappings": true
+          },
+          "externalSetMarkChain": "KUBE-MARK-MASQ"
+        }
+      ]
+    }
+---
+
+###########################################################
+#
+# !!! DO NOT EDIT THINGS BELOW THIS LINE !!!
+#
+###########################################################
+
+
+###########################################################
+#  Components and other resources
+###########################################################
+
+# This installs the contiv-etcd (ETCD server to be used by Contiv) on the master node in a Kubernetes cluster.
+# In odrer to dump the content of ETCD, you can use the kubectl exec command similar to this:
+#   kubectl exec contiv-etcd-cxqhr -n kube-system etcdctl -- get --endpoints=[127.0.0.1:12379] --prefix="true" ""
+apiVersion: apps/v1beta2
+kind: StatefulSet
+metadata:
+  name: contiv-etcd
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-etcd
+spec:
+  serviceName: contiv-etcd
+  selector:
+    matchLabels:
+      k8s-app: contiv-etcd
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-etcd
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Only run this pod on the master.
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+      hostNetwork: true
+
+      containers:
+        - name: contiv-etcd
+          image: quay.io/coreos/etcd:v3.3.11-arm64
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: CONTIV_ETCD_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.podIP
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+            - name: ETCDCTL_API
+              value: "3"
+            - name: ETCD_UNSUPPORTED_ARCH
+              value: "arm64"
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - /usr/local/bin/etcd --name=contiv-etcd --data-dir=/var/etcd/contiv-data
+              --advertise-client-urls=http://0.0.0.0:12379 --listen-client-urls=http://0.0.0.0:12379
+              --listen-peer-urls=http://0.0.0.0:12380
+          volumeMounts:
+            - name: var-etcd
+              mountPath: /var/etcd/
+          livenessProbe:
+            exec:
+              command:
+                - /bin/sh
+                - -c
+                - |
+                  echo "$HOST_IP" | grep -q ':'
+                  if [ "$?" -eq "0" ];
+                  then
+                     HOST_IP="[$HOST_IP]"
+                  fi
+                  etcdctl get --endpoints=$HOST_IP:32379 /
+            periodSeconds: 3
+            initialDelaySeconds: 20
+          resources:
+            requests:
+              cpu: 100m
+      volumes:
+        - name: var-etcd
+          hostPath:
+            path: /var/etcd
+
+---
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: contiv-etcd
+  namespace: kube-system
+spec:
+  type: NodePort
+  # Match contiv-etcd DaemonSet.
+  selector:
+    k8s-app: contiv-etcd
+  ports:
+    - port: 12379
+      nodePort: 32379
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-ksr-http-cfg
+  namespace: kube-system
+data:
+  http.conf: |
+    endpoint: "0.0.0.0:9191"
+
+---
+# This config map contains ETCD configuration for connecting to the contiv-etcd defined above.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-etcd-cfg
+  namespace: kube-system
+data:
+  etcd.conf: |
+    dial-timeout: 10000000000
+    allow-delayed-start: true
+    insecure-transport: true
+    endpoints:
+      - "__HOST_IP__:32379"
+
+---
+
+# This config map contains ETCD configuration for connecting to the contiv-etcd defined above with auto compact.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-etcd-withcompact-cfg
+  namespace: kube-system
+data:
+  etcd.conf: |
+    insecure-transport: true
+    dial-timeout: 10000000000
+    auto-compact: 600000000000
+    allow-delayed-start: true
+    reconnect-interval: 2000000000
+    endpoints:
+      - "__HOST_IP__:32379"
+
+---
+
+# This installs contiv-vswitch on each master and worker node in a Kubernetes cluster.
+# It consists of the following containers:
+#   - contiv-vswitch container: contains VPP and its management agent
+#   - contiv-cni container: installs CNI on the host
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: contiv-vswitch
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-vswitch
+spec:
+  selector:
+    matchLabels:
+      k8s-app: contiv-vswitch
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-vswitch
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      hostNetwork: true
+      hostPID: true
+
+      # Init containers are executed before regular containers, must finish successfully before regular
+      # ones are started.
+      initContainers:
+        # This container installs the Contiv CNI binaries and CNI network config file on each node.
+        - name: contiv-cni
+          image: iecedge/cni-arm64:v3.2.1-macbin
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: SLEEP
+              value: "false"
+          volumeMounts:
+            - mountPath: /opt/cni/bin
+              name: cni-bin-dir
+            - mountPath: /etc/cni/net.d
+              name: cni-net-dir
+            - mountPath: /cni/cfg
+              name: contiv-cni-cfg
+            - mountPath: /var/run/contiv
+              name: contiv-run
+
+        # This init container extracts/copies default VPP config to the host and initializes VPP core dumps.
+        - name: vpp-init
+          image: iecedge/vswitch-arm64:v3.2.1-macbin
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              set -eu
+              chmod 700 /run/vpp
+              rm -rf /dev/shm/db /dev/shm/global_vm /dev/shm/vpe-api
+              if [ ! -e /host/etc/vpp/contiv-vswitch.conf ]; then
+                  cp /etc/vpp/contiv-vswitch.conf /host/etc/vpp/
+              fi
+              if [ ! -d /var/run/contiv ]; then
+                  mkdir /var/run/contiv
+              fi
+              chmod 700 /var/run/contiv
+              rm -f /var/run/contiv/cni.sock
+              if ip link show vpp1 >/dev/null 2>&1; then
+                   ip link del vpp1
+              fi
+              cp -f /usr/local/bin/vppctl /host/usr/local/bin/vppctl
+              sysctl -w debug.exception-trace=1
+              sysctl -w kernel.core_pattern="/var/contiv/dumps/%e-%t"
+              ulimit -c unlimited
+              echo 2 > /proc/sys/fs/suid_dumpable
+              # replace localhost IP by node IP since node port doesn't work
+              # on localhost IP in a certain scenario
+              cp /etc/etcd/etcd.conf /tmp/etcd.conf
+              set +e
+              echo "$HOST_IP" | grep -q ':'
+              if [ "$?" -eq "0" ];
+              then
+                 HOST_IP="[$HOST_IP]"
+              fi
+              sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/etcd.conf
+          resources: {}
+          securityContext:
+            privileged: true
+          volumeMounts:
+            - name: usr-local-bin
+              mountPath: /host/usr/local/bin
+            - name: vpp-cfg
+              mountPath: /host/etc/vpp
+            - name: shm
+              mountPath: /dev/shm
+            - name: vpp-run
+              mountPath: /run/vpp
+            - name: contiv-run
+              mountPath: /var/run/contiv
+            - name: tmp
+              mountPath: /tmp
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+            - name: core-dumps
+              mountPath: /var/contiv/dumps
+
+      containers:
+        # Runs contiv-vswitch container on each Kubernetes node.
+        # It contains the vSwitch VPP and its management agent.
+        - name: contiv-vswitch
+          image: iecedge/vswitch-arm64:v3.2.1-macbin
+          imagePullPolicy: IfNotPresent
+          securityContext:
+            privileged: true
+          ports:
+            # readiness + liveness probe
+            - containerPort: 9999
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9999
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 15
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9999
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 60
+          env:
+            - name: MICROSERVICE_LABEL
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+            - name: GOVPPMUX_NOSOCK
+              value: "1"
+            - name: CONTIV_CONFIG
+              value: "/etc/contiv/contiv.conf"
+            - name: CONTROLLER_CONFIG
+              value: "/etc/contiv/controller.conf"
+            - name: SERVICE_CONFIG
+              value: "/etc/contiv/service.conf"
+            - name: ETCD_CONFIG
+              value: "/tmp/etcd.conf"
+            - name: BOLT_CONFIG
+              value: "/etc/vpp-agent/bolt.conf"
+            # Uncomment to log graph traversal (very verbose):
+            # - name: KVSCHED_LOG_GRAPH_WALK
+            #   value: "true"
+            # Uncomment to verify effect of every transaction:
+            # - name: KVSCHED_VERIFY_MODE
+            #   value: "true"
+            - name: TELEMETRY_CONFIG
+              value: "/etc/vpp-agent/telemetry.conf"
+            - name: GOVPP_CONFIG
+              value: "/etc/vpp-agent/govpp.conf"
+            - name: LOGS_CONFIG
+              value: "/etc/vpp-agent/logs.conf"
+            - name: HTTP_CONFIG
+              value: "/etc/vpp-agent/http.conf"
+            - name: GRPC_CONFIG
+              value: "/etc/vpp-agent/grpc.conf"
+            - name: LINUX_IFPLUGIN_CONFIG
+              value: "/etc/vpp-agent/linux-ifplugin.conf"
+            - name: LINUX_L3PLUGIN_CONFIG
+              value: "/etc/vpp-agent/linux-l3plugin.conf"
+            - name: KVSCHEDULER_CONFIG
+              value: "/etc/vpp-agent/kvscheduler.conf"
+            - name: DISABLE_INTERFACE_STATS
+              value: "y"
+          volumeMounts:
+            - name: var-bolt
+              mountPath: /var/bolt
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+            - name: vpp-cfg
+              mountPath: /etc/vpp
+            - name: shm
+              mountPath: /dev/shm
+            - name: dev
+              mountPath: /dev
+            - name: sys-bus-pci
+              mountPath: /sys/bus/pci
+            - name: vpp-run
+              mountPath: /run/vpp
+            - name: contiv-run
+              mountPath: /var/run/contiv
+            - name: contiv-agent-cfg
+              mountPath: /etc/contiv
+            - name: vpp-agent-cfg
+              mountPath: /etc/vpp-agent
+            - name: tmp
+              mountPath: /tmp
+            - name: core-dumps
+              mountPath: /var/contiv/dumps
+            - name: docker-socket
+              mountPath: /var/run/docker.sock
+            - name: kubelet-api
+              mountPath: /var/lib/kubelet
+          resources:
+            limits:
+              hugepages-2Mi: 512Mi
+              memory: 512Mi
+            requests:
+              cpu: 250m
+
+      volumes:
+        # Used to connect to contiv-etcd.
+        - name: etcd-cfg
+          configMap:
+            name: contiv-etcd-cfg
+        # Used to install CNI.
+        - name: cni-bin-dir
+          hostPath:
+            path: /opt/cni/bin
+        - name: cni-net-dir
+          hostPath:
+            path: /etc/cni/net.d
+        # VPP startup config folder.
+        - name: vpp-cfg
+          hostPath:
+            path: /etc/vpp
+        # To install vppctl.
+        - name: usr-local-bin
+          hostPath:
+            path: /usr/local/bin
+        # /dev mount is required for DPDK-managed NICs on VPP (/dev/uio0) and for shared memory communication with VPP
+        # (/dev/shm)
+        - name: dev
+          hostPath:
+            path: /dev
+        - name: shm
+          hostPath:
+            path: /dev/shm
+        # /sys/bus/pci is required for binding PCI devices to specific drivers
+        - name: sys-bus-pci
+          hostPath:
+            path: /sys/bus/pci
+        # For CLI unix socket.
+        - name: vpp-run
+          hostPath:
+            path: /run/vpp
+        # For CNI / STN unix domain socket
+        - name: contiv-run
+          hostPath:
+            path: /var/run/contiv
+        # Used to configure contiv agent.
+        - name: contiv-agent-cfg
+          configMap:
+            name: contiv-agent-cfg
+        # Used to configure vpp agent.
+        - name: vpp-agent-cfg
+          configMap:
+            name: vpp-agent-cfg
+        # Used for vswitch core dumps
+        - name: core-dumps
+          hostPath:
+            path: /var/contiv/dumps
+        # /tmp in the vswitch container (needs to be persistent between container restarts to obtain post-mortem files)
+        - name: tmp
+          emptyDir:
+            medium: Memory
+        # persisted bolt data
+        - name: var-bolt
+          hostPath:
+            path: /var/bolt
+        - name: docker-socket
+          hostPath:
+            path: /var/run/docker.sock
+        # CNI config
+        - name: contiv-cni-cfg
+          configMap:
+            name: contiv-cni-cfg
+        # kubelet api dir
+        - name: kubelet-api
+          hostPath:
+            path: /var/lib/kubelet
+
+---
+
+# This installs the contiv-ksr (Kubernetes State Reflector) on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: contiv-ksr
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-ksr
+spec:
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-ksr
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Only run this pod on the master.
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+      hostNetwork: true
+      # This grants the required permissions to contiv-ksr.
+      serviceAccountName: contiv-ksr
+
+      initContainers:
+        # This init container waits until etcd is started
+        - name: wait-foretcd
+          env:
+            - name: ETCDPORT
+              value: "32379"
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          image: arm64v8/busybox:1.29.3
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+              echo "$HOST_IP" | grep -q ':'
+              if [ "$?" -eq "0" ];
+              then
+                 HOST_IP="[$HOST_IP]"
+              fi
+              sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+              until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+
+
+      containers:
+        - name: contiv-ksr
+          image: iecedge/ksr-arm64:v3.2.1-macbin
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: ETCD_CONFIG
+              value: "/tmp/cfg/etcd.conf"
+            - name: HTTP_CONFIG
+              value: "/etc/http/http.conf"
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: http-cfg
+              mountPath: /etc/http
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9191
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 10
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9191
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 30
+          resources:
+            requests:
+              cpu: 100m
+      volumes:
+        # Used to connect to contiv-etcd.
+        - name: etcd-cfg
+          configMap:
+            name: contiv-etcd-withcompact-cfg
+        - name: tmp-cfg
+          emptyDir: {}
+        - name: http-cfg
+          configMap:
+            name: contiv-ksr-http-cfg
+
+---
+
+# This cluster role defines a set of permissions required for contiv-ksr.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: contiv-ksr
+  namespace: kube-system
+rules:
+  - apiGroups:
+      - ""
+      - extensions
+    resources:
+      - pods
+      - namespaces
+      - networkpolicies
+      - services
+      - endpoints
+      - nodes
+    verbs:
+      - watch
+      - list
+
+---
+
+# This defines a service account for contiv-ksr.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: contiv-ksr
+  namespace: kube-system
+
+---
+
+# This binds the contiv-ksr cluster role with contiv-ksr service account.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: contiv-ksr
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: contiv-ksr
+subjects:
+  - kind: ServiceAccount
+    name: contiv-ksr
+    namespace: kube-system
+
+---
+
+# This installs the contiv-crd on the master node in a Kubernetes cluster.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+  name: contiv-crd
+  namespace: kube-system
+  labels:
+    k8s-app: contiv-crd
+spec:
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        k8s-app: contiv-crd
+      annotations:
+        # Marks this pod as a critical add-on.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      tolerations:
+        # We need this to schedule on the master no matter what else is going on, so tolerate everything.
+        - key: ''
+          operator: Exists
+          effect: ''
+        # This likely isn't needed due to the above wildcard, but keep it in for now.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Only run this pod on the master.
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+      hostNetwork: true
+      # This grants the required permissions to contiv-crd.
+      serviceAccountName: contiv-crd
+
+      initContainers:
+        # This init container waits until etcd is started
+        - name: wait-foretcd
+          env:
+            - name: ETCDPORT
+              value: "32379"
+            - name: HOST_IP
+              valueFrom:
+                fieldRef:
+                  fieldPath: status.hostIP
+          image: arm64v8/busybox:1.29.3
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              cp /etc/etcd/etcd.conf /tmp/cfg/etcd.conf
+              echo "$HOST_IP" | grep -q ':'
+              if [ "$?" -eq "0" ];
+              then
+                 HOST_IP="[$HOST_IP]"
+              fi
+              sed -i "s/__HOST_IP__/$HOST_IP/g" /tmp/cfg/etcd.conf
+              until nc -w 2 $HOST_IP:$ETCDPORT; do echo waiting for etcd; sleep 2; done;
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: etcd-cfg
+              mountPath: /etc/etcd
+
+        # This init container copies contiv-netctl tool to the host.
+        - name: netctl-init
+          image: iecedge/crd-arm64:v3.2.1-macbin
+          imagePullPolicy: IfNotPresent
+          command:
+            - /bin/sh
+          args:
+            - -c
+            - |
+              echo '#!/bin/sh
+              kubectl get pods -n kube-system | \
+                grep contiv-crd | \
+                cut -d " " -f 1 | \
+                xargs -I{} kubectl exec -n kube-system {} \
+                /contiv-netctl "$@"' \
+              > /host/usr/local/bin/contiv-netctl || true
+              chmod +x /host/usr/local/bin/contiv-netctl || true
+          volumeMounts:
+            - name: usr-local-bin
+              mountPath: /host/usr/local/bin
+
+      containers:
+        - name: contiv-crd
+          image: iecedge/crd-arm64:v3.2.1-macbin
+          imagePullPolicy: IfNotPresent
+          env:
+            - name: ETCD_CONFIG
+              value: "/tmp/cfg/etcd.conf"
+            - name: HTTP_CONFIG
+              value: "/etc/http/http.conf"
+            - name: HTTP_CLIENT_CONFIG
+              value: "/etc/http/http.client.conf"
+            - name: CONTIV_CRD_VALIDATE_INTERVAL
+              value: "5"
+            - name: CONTIV_CRD_VALIDATE_STATE
+              value: "SB"
+            - name: DISABLE_NETCTL_REST
+              value: "true"
+          volumeMounts:
+            - name: tmp-cfg
+              mountPath: /tmp/cfg
+            - name: http-cfg
+              mountPath: /etc/http
+          readinessProbe:
+            httpGet:
+              path: /readiness
+              port: 9090
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 10
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9090
+            periodSeconds: 3
+            timeoutSeconds: 2
+            failureThreshold: 3
+            initialDelaySeconds: 30
+          resources:
+            requests:
+              cpu: 100m
+
+      volumes:
+        # Used to connect to contiv-etcd.
+        - name: etcd-cfg
+          configMap:
+            name: contiv-etcd-cfg
+        - name: usr-local-bin
+          hostPath:
+            path: /usr/local/bin
+        - name: http-cfg
+          configMap:
+            name: contiv-crd-http-cfg
+        - name: tmp-cfg
+          emptyDir: {}
+---
+
+# This cluster role defines a set of permissions required for contiv-crd.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: contiv-crd
+  namespace: kube-system
+rules:
+  - apiGroups:
+      - apiextensions.k8s.io
+      - nodeconfig.contiv.vpp
+      - telemetry.contiv.vpp
+      - contivpp.io
+    resources:
+      - customresourcedefinitions
+      - telemetryreports
+      - nodeconfigs
+      - customnetworks
+      - servicefunctionchains
+    verbs:
+      - "*"
+
+---
+
+# This defines a service account for contiv-crd.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: contiv-crd
+  namespace: kube-system
+
+---
+
+# This binds the contiv-crd cluster role with contiv-crd service account.
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: contiv-crd
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: contiv-crd
+subjects:
+  - kind: ServiceAccount
+    name: contiv-crd
+    namespace: kube-system
+
+---
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: contiv-crd-http-cfg
+  namespace: kube-system
+data:
+  http.conf: |
+    endpoint: "0.0.0.0:9090"
+  http.client.conf: |
+    port: 9999
+    use-https: false
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/install-ovn-k8s.sh b/src/foundation/scripts/cni/ovn-kubernetes/install-ovn-k8s.sh
new file mode 100755 (executable)
index 0000000..b858c96
--- /dev/null
@@ -0,0 +1,46 @@
+#!/bin/bash -ex
+# shellcheck disable=SC2016
+
+SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}")
+
+function wait_for {
+  # Execute in a subshell to prevent local variable override during recursion
+  (
+    local total_attempts=$1; shift
+    local cmdstr=$*
+    local sleep_time=2
+    echo -e "\n[wait_for] Waiting for cmd to return success: ${cmdstr}"
+    # shellcheck disable=SC2034
+    for attempt in $(seq "${total_attempts}"); do
+      echo "[wait_for] Attempt ${attempt}/${total_attempts%.*} for: ${cmdstr}"
+      # shellcheck disable=SC2015
+      eval "${cmdstr}" && echo "[wait_for] OK: ${cmdstr}" && return 0 || true
+      sleep "${sleep_time}"
+    done
+    echo "[wait_for] ERROR: Failed after max attempts: ${cmdstr}"
+    return 1
+  )
+}
+
+# Create OVN namespace, service accounts, ovnkube-db headless service, configmap, and policies
+kubectl create -f ${SCRIPTS_DIR}/yaml/ovn-setup.yaml
+wait_for 5 'test $(kubectl get svc -n ovn-kubernetes | grep ovnkube-db -c ) -eq 1'
+
+
+# Run ovnkube-db daemonset.
+kubectl create -f ${SCRIPTS_DIR}/yaml/ovnkube-db.yaml
+wait_for 60 'test $(kubectl get pods -n ovn-kubernetes | grep -e "ovnkube-db" | grep "Running" -c) -eq 1'
+
+
+# Run ovnkube-master daemonset.
+kubectl create -f ${SCRIPTS_DIR}/yaml/ovnkube-master.yaml
+wait_for 60 'test $(kubectl get pods -n ovn-kubernetes | grep -e "ovnkube-master" | grep "Running" -c) -eq 1'
+
+
+# Run ovnkube daemonsets for nodes, maybe more than 1 ovnkube-node pods since there would be 1 ovnkube-node
+# pod on each K8s node
+kubectl create -f ${SCRIPTS_DIR}/yaml/ovnkube-node.yaml
+wait_for 60 'test $(kubectl get pods -n ovn-kubernetes | grep -e "ovnkube-node" | grep "Running" -c) -ge 1'
+
+
+#kubectl get pods -n ovn-kubernetes
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/push-manifest.sh b/src/foundation/scripts/cni/ovn-kubernetes/push-manifest.sh
new file mode 100755 (executable)
index 0000000..f4e3c80
--- /dev/null
@@ -0,0 +1,36 @@
+#!/bin/bash
+
+#Supported platforms of multi-arch images are: amd64 arm64
+LINUX_ARCH=(amd64 arm64)
+PLATFORMS=linux/${LINUX_ARCH[0]}
+for i in $(seq 1  $[${#LINUX_ARCH[@]}-1])
+do
+    PLATFORMS=$PLATFORMS,linux/${LINUX_ARCH[$i]}
+done
+
+IMAGES_OVN=("ovn-daemonset")
+#IMAGES_OVN=("ovn-daemonset" "ovn-daemonset-u")
+BRANCH_TAG=latest
+
+#Before push, 'docker login' is needed
+push_multi_arch(){
+
+       if [ ! -f "./manifest-tool" ]
+       then
+                sudo apt-get install -y jq
+                wget https://github.com/estesp/manifest-tool/releases/download/v0.9.0/manifest-tool-linux-${BUILDARCH} \
+                -O manifest-tool && \
+                chmod +x ./manifest-tool
+       fi
+
+       for IMAGE in "${IMAGES_OVN[@]}"
+       do
+         echo "multi arch image: ""iecedge/${IMAGE}"
+         ./manifest-tool push from-args --platforms ${PLATFORMS} --template iecedge/${IMAGE}-ARCH:${BRANCH_TAG} \
+                --target iecedge/${IMAGE}:${BRANCH_TAG}
+       done
+}
+
+echo "Push fat manifest for multi-arch images:"
+push_multi_arch
+
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/templates/cleanup-ovn-cni.conf.j2 b/src/foundation/scripts/cni/ovn-kubernetes/templates/cleanup-ovn-cni.conf.j2
new file mode 100644 (file)
index 0000000..a184074
--- /dev/null
@@ -0,0 +1,2 @@
+r /etc/cni/net.d/10-ovn-kubernetes.conf
+r /etc/origin/openvswitch/conf.db
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/templates/ovn-setup.yaml.j2 b/src/foundation/scripts/cni/ovn-kubernetes/templates/ovn-setup.yaml.j2
new file mode 100644 (file)
index 0000000..c1d81d1
--- /dev/null
@@ -0,0 +1,132 @@
+---
+# ovn-namespace.yaml
+#
+# Setup for Kubernetes to support the ovn-kubernetes plugin
+#
+# Create the namespace for ovn-kubernetes.
+#
+# This provisioning is done as part of installation after the cluster is
+# up and before the ovn daemonsets are created.
+
+apiVersion: v1
+kind: Namespace
+metadata:
+  annotations:
+    openshift.io/node-selector: "beta.kubernetes.io/os=linux"
+  name: ovn-kubernetes
+
+---
+# ovn-policy.yaml
+#
+# Setup for Kubernetes to support the ovn-kubernetes plugin
+#
+# Create the service account and policies.
+# ovnkube interacts with kubernetes and the environment
+# must be properly set up.
+# 
+# This provisioning is done as part of installation after the cluster is
+# up and before the ovn daemonsets are created.
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: ovn
+  namespace: ovn-kubernetes
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  annotations:
+    rbac.authorization.k8s.io/system-only: "true"
+  name: system:ovn-reader
+rules:
+- apiGroups:
+  - ""
+  - extensions
+  resources:
+  - pods
+  - namespaces
+  - networkpolicies
+  - nodes
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - networkpolicies
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+  verbs:
+  - create
+  - patch
+  - update
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: ovn-reader
+roleRef:
+  name: system:ovn-reader
+  kind: ClusterRole
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: ovn
+  namespace: ovn-kubernetes
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: cluster-admin-0
+roleRef:
+  name: cluster-admin
+  kind: ClusterRole
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: ovn
+  namespace: ovn-kubernetes
+
+---
+# service to expose the ovnkube-db pod
+apiVersion: v1
+kind: Service
+metadata:
+  name: ovnkube-db
+  namespace: ovn-kubernetes
+spec:
+  ports:
+  - name: north
+    port: 6641
+    protocol: TCP
+    targetPort: 6641
+  - name: south
+    port: 6642
+    protocol: TCP
+    targetPort: 6642
+  sessionAffinity: None
+  clusterIP: None
+  type: ClusterIP
+
+---
+# The network cidr and service cidr are set in the ovn-config configmap
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: ovn-config
+  namespace: ovn-kubernetes
+data:
+  net_cidr:      "{{ net_cidr | default('10.128.0.0/14/23') }}"
+  svc_cidr:      "{{ svc_cidr | default('172.30.0.0/16') }}"
+  k8s_apiserver: "{{ k8s_apiserver.stdout }}"
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-db-vip.yaml.j2 b/src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-db-vip.yaml.j2
new file mode 100644 (file)
index 0000000..bea4a9a
--- /dev/null
@@ -0,0 +1,139 @@
+# ovnkube-db HA using Corosync/Pacemaker
+# daemonset version 3
+# starts ovn NB/SB ovsdb daemons in a single container
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ovnkube-db
+  # namespace set up by install
+  namespace: ovn-kubernetes
+  annotations:
+    kubernetes.io/description: |
+      This daemonset launches the OVN NB/SB DB server in a single container.
+spec:
+  progressDeadlineSeconds: 600
+  replicas: {{ ovn_db_replicas | default(3) }}
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      name: ovnkube-db
+  strategy:
+    rollingUpdate:
+      maxSurge: 25%
+      maxUnavailable: 25%
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: ovnkube-db
+        component: network
+        type: infra
+        openshift.io/component: network
+        beta.kubernetes.io/os: "linux"
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: ovn
+      hostNetwork: true
+      # required to be scheduled on node with openvswitch.org/ovnkube-db=true label but can
+      # only have one instance per node
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: openvswitch.org/ovnkube-db
+                operator: In
+                values:
+                - "true"
+        podAntiAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchExpressions:
+              - key: name
+                operator: In
+                values:
+                - ovnkube-db
+            topologyKey: kubernetes.io/hostname
+
+      containers:
+      # ovsdb with corosync in one container - v3
+      - name: ovsdb
+        image: "{{ ovn_db_vip_image | default('docker.io/ovnkube/ovndb-vip-u:latest') }}"
+        imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+        command: ["/root/ovndb-vip.sh", "run-ovndb"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["NET_ADMIN"]
+
+        volumeMounts:
+        # ovn db is stored in the pod in /etc/openvswitch
+        # and on the host in /var/lib/openvswitch/
+        - mountPath: /etc/openvswitch/
+          name: host-var-lib-ovs
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+        - mountPath: /etc/corosync
+          name: host-etc-corosync
+        - mountPath: /var/log/corosync
+          name: host-var-log-corosync
+        - mountPath: /dev/shm
+          name: dshm
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 1024Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVN_LOG_NB
+          value: "-vconsole:info -vfile:info"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: OVN_DB_VIP
+          value: "{{ ovn_db_vip }}"
+        ports:
+        - name: healthz
+          containerPort: 10256
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10256
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      volumes:
+      - name: host-var-lib-ovs
+        hostPath:
+          path: /var/lib/openvswitch
+      - name: host-var-log-ovs
+        hostPath:
+          path: /var/log/openvswitch
+      - name: host-var-log-corosync
+        hostPath:
+          path: /var/log/corosync
+      - name: host-etc-corosync
+        hostPath:
+          path: /etc/corosync
+      - name: dshm
+        emptyDir:
+          medium: Memory
+
+      tolerations:
+      - operator: "Exists"
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-db.yaml.j2 b/src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-db.yaml.j2
new file mode 100644 (file)
index 0000000..10523be
--- /dev/null
@@ -0,0 +1,161 @@
+# ovnkube-db
+# daemonset version 3
+# starts ovn NB/SB ovsdb daemons, each in a separate container
+# it is running on master node for now, but does not need to be the case
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ovnkube-db
+  # namespace set up by install
+  namespace: ovn-kubernetes
+  annotations:
+    kubernetes.io/description: |
+      This daemonset launches the OVN NB/SB ovsdb service components.
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      name: ovnkube-db
+  strategy:
+    rollingUpdate:
+      maxSurge: 25%
+      maxUnavailable: 25%
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: ovnkube-db
+        component: network
+        type: infra
+        openshift.io/component: network
+        beta.kubernetes.io/os: "linux"
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: ovn
+      hostNetwork: true
+      containers:
+      # firewall rules for ovn - assumed to be setup
+      # iptables -A OS_FIREWALL_ALLOW -p tcp -m state --state NEW -m tcp --dport 6641 -j ACCEPT
+      # iptables -A OS_FIREWALL_ALLOW -p tcp -m state --state NEW -m tcp --dport 6642 -j ACCEPT
+
+      # nb-ovsdb - v3
+      - name: nb-ovsdb
+        image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+        imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+
+        command: ["/root/ovnkube.sh", "nb-ovsdb"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["NET_ADMIN"]
+
+        volumeMounts:
+        # ovn db is stored in the pod in /etc/openvswitch
+        # and on the host in /var/lib/openvswitch/
+        - mountPath: /etc/openvswitch/
+          name: host-var-lib-ovs
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVN_LOG_NB
+          value: "-vconsole:info -vfile:info"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: healthz
+          containerPort: 10256
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10256
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      # sb-ovsdb - v3
+      - name: sb-ovsdb
+        image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+        imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+
+        command: ["/root/ovnkube.sh", "sb-ovsdb"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["NET_ADMIN"]
+
+        volumeMounts:
+        # ovn db is stored in the pod in /etc/openvswitch
+        # and on the host in /var/lib/openvswitch/
+        - mountPath: /etc/openvswitch/
+          name: host-var-lib-ovs
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVN_LOG_SB
+          value: "-vconsole:info -vfile:info"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: healthz
+          containerPort: 10255
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10255
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/os: "linux"
+      volumes:
+      - name: host-var-lib-ovs
+        hostPath:
+          path: /var/lib/openvswitch
+      - name: host-var-log-ovs
+        hostPath:
+          path: /var/log/openvswitch
+      tolerations:
+      - operator: "Exists"
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-master.yaml.j2 b/src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-master.yaml.j2
new file mode 100644 (file)
index 0000000..b8bfc4f
--- /dev/null
@@ -0,0 +1,236 @@
+# ovnkube-master
+# daemonset version 3
+# starts master daemons, each in a separate container
+# it is run on the master node(s)
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ovnkube-master
+  # namespace set up by install
+  namespace: ovn-kubernetes
+  annotations:
+    kubernetes.io/description: |
+      This daemonset launches the ovn-kubernetes networking components.
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      name: ovnkube-master
+  strategy:
+    rollingUpdate:
+      maxSurge: 25%
+      maxUnavailable: 25%
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: ovnkube-master
+        component: network
+        type: infra
+        openshift.io/component: network
+        beta.kubernetes.io/os: "linux"
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: ovn
+      hostNetwork: true
+
+      containers:
+
+      # run-ovn-northd - v3
+      - name: run-ovn-northd
+        image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+        imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+
+        command: ["/root/ovnkube.sh", "run-ovn-northd"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["SYS_NICE"]
+
+        volumeMounts:
+        # Run directories where we need to be able to access sockets
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVN_LOG_NORTHD
+          value: "-vconsole:info"
+        - name: OVN_NET_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: net_cidr
+        - name: OVN_SVC_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: svc_cidr
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: K8S_NODE
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: healthz
+          containerPort: 10257
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10257
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      - name: run-nbctld
+        image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+        imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+
+        command: ["/root/ovnkube.sh", "run-nbctld"]
+
+        securityContext:
+          runAsUser: 0
+
+        volumeMounts:
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+
+        ports:
+        - name: healthz
+          containerPort: 10260
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10258
+        #     scheme: HTTP
+        lifecycle:
+
+      - name: ovnkube-master
+        image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+        imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+
+        command: ["/root/ovnkube.sh", "ovn-master"]
+
+        securityContext:
+          runAsUser: 0
+
+        volumeMounts:
+        # Run directories where we need to be able to access sockets
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/log/ovn-kubernetes/
+          name: host-var-log-ovnkube
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVNKUBE_LOGLEVEL
+          value: "4"
+        - name: OVN_NET_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: net_cidr
+        - name: OVN_SVC_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: svc_cidr
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: K8S_NODE
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: healthz
+          containerPort: 10254
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10254
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/os: "linux"
+      volumes:
+      # TODO: Need to check why we need this?
+      - name: host-var-run-dbus
+        hostPath:
+          path: /var/run/dbus
+      - name: host-var-log-ovs
+        hostPath:
+          path: /var/log/openvswitch
+      - name: host-var-log-ovnkube
+        hostPath:
+          path: /var/log/ovn-kubernetes
+      - name: host-var-run-ovs
+        hostPath:
+          path: /var/run/openvswitch
+      tolerations:
+      - operator: "Exists"
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-node.yaml.j2 b/src/foundation/scripts/cni/ovn-kubernetes/templates/ovnkube-node.yaml.j2
new file mode 100644 (file)
index 0000000..8e7b181
--- /dev/null
@@ -0,0 +1,273 @@
+---
+# ovnkube-node
+# daemonset version 3
+# starts node daemons for ovs and ovn, each in a separate container
+# it is run on all nodes
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: ovnkube-node
+  # namespace set up by install
+  namespace: ovn-kubernetes
+  annotations:
+    kubernetes.io/description: |
+      This daemonset launches the ovn-kubernetes networking components.
+spec:
+  selector:
+    matchLabels:
+      app: ovnkube-node
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: ovnkube-node
+        component: network
+        type: infra
+        openshift.io/component: network
+        beta.kubernetes.io/os: "linux"
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: ovn
+      hostNetwork: true
+      hostPID: true
+      containers:
+
+      # ovsdb-server and ovs-switchd daemons
+      - name: ovs-daemons
+        image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+        imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+
+        command: ["/root/ovnkube.sh", "ovs-server"]
+
+        livenessProbe:
+          exec:
+            command:
+            - /usr/share/openvswitch/scripts/ovs-ctl
+            - status
+          initialDelaySeconds: 15
+          periodSeconds: 5
+
+        securityContext:
+          runAsUser: 0
+          # Permission could be reduced by selecting an appropriate SELinux policy
+          privileged: true
+
+        volumeMounts:
+        - mountPath: /lib/modules
+          name: host-modules
+          readOnly: true
+        - mountPath: /run/openvswitch
+          name: host-run-ovs
+        - mountPath: /var/run/openvswitch
+          name: host-var-run-ovs
+        - mountPath: /sys
+          name: host-sys
+          readOnly: true
+        - mountPath: /etc/openvswitch
+          name: host-config-openvswitch
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+          limits:
+            cpu: 200m
+            memory: 400Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        lifecycle:
+          preStop:
+            exec:
+              command: ["/root/ovnkube.sh", "cleanup-ovs-server"]
+
+      - name: ovn-controller
+        image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+        imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+
+        command: ["/root/ovnkube.sh", "ovn-controller"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["SYS_NICE"]
+
+        volumeMounts:
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVNKUBE_LOGLEVEL
+          value: "4"
+        - name: OVN_NET_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: net_cidr
+        - name: OVN_SVC_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: svc_cidr
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: K8S_NODE
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+
+        ports:
+        - name: healthz
+          containerPort: 10258
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10258
+        #     scheme: HTTP
+        lifecycle:
+
+      - name: ovnkube-node
+        image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+        imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+
+        command: ["/root/ovnkube.sh", "ovn-node"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["NET_ADMIN", "SYS_ADMIN", "SYS_PTRACE"]
+
+        volumeMounts:
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/log/ovn-kubernetes/
+          name: host-var-log-ovnkube
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+        # We mount our socket here
+        - mountPath: /var/run/ovn-kubernetes
+          name: host-var-run-ovn-kubernetes
+        # CNI related mounts which we take over
+        - mountPath: /opt/cni/bin
+          name: host-opt-cni-bin
+        - mountPath: /etc/cni/net.d
+          name: host-etc-cni-netd
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVNKUBE_LOGLEVEL
+          value: "5"
+        - name: OVN_NET_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: net_cidr
+        - name: OVN_SVC_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: svc_cidr
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: K8S_NODE
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: OVN_GATEWAY_MODE
+          value: "{{ ovn_gateway_mode }}"
+        - name: OVN_GATEWAY_OPTS
+          value: "{{ ovn_gateway_opts }}"
+
+        ports:
+        - name: healthz
+          containerPort: 10259
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10259
+        #     scheme: HTTP
+        lifecycle:
+          preStop:
+            exec:
+              command: ["/root/ovnkube.sh", "cleanup-ovn-node"]
+
+      nodeSelector:
+        beta.kubernetes.io/os: "linux"
+      volumes:
+      - name: host-modules
+        hostPath:
+          path: /lib/modules
+
+      - name: host-var-run-dbus
+        hostPath:
+          path: /var/run/dbus
+      - name: host-var-log-ovs
+        hostPath:
+          path: /var/log/openvswitch
+      - name: host-var-log-ovnkube
+        hostPath:
+          path: /var/log/ovn-kubernetes
+      - name: host-run-ovs
+        hostPath:
+          path: /run/openvswitch
+      - name: host-var-run-ovs
+        hostPath:
+          path: /var/run/openvswitch
+      - name: host-var-run-ovn-kubernetes
+        hostPath:
+          path: /var/run/ovn-kubernetes
+      - name: host-sys
+        hostPath:
+          path: /sys
+      - name: host-opt-cni-bin
+        hostPath:
+          path: /opt/cni/bin
+      - name: host-etc-cni-netd
+        hostPath:
+          path: /etc/cni/net.d
+      - name: host-config-openvswitch
+        hostPath:
+          path: /etc/origin/openvswitch
+      tolerations:
+      - operator: "Exists"
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/uninstall-ovn-k8s.sh b/src/foundation/scripts/cni/ovn-kubernetes/uninstall-ovn-k8s.sh
new file mode 100755 (executable)
index 0000000..ea3834a
--- /dev/null
@@ -0,0 +1,23 @@
+#!/bin/bash -ex
+# shellcheck disable=SC1073,SC1072,SC1039,SC2059
+
+SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}")
+
+# Run ovnkube daemonsets for nodes
+kubectl delete -f ${SCRIPTS_DIR}/yaml/ovnkube-node.yaml
+sleep 3
+
+# Run ovnkube-master daemonset.
+kubectl delete -f ${SCRIPTS_DIR}/yaml/ovnkube-master.yaml
+sleep 3
+
+
+# Delete ovnkube-db daemonset.
+kubectl delete -f ${SCRIPTS_DIR}/yaml/ovnkube-db.yaml
+sleep 3
+
+# Delete OVN namespace, service accounts, ovnkube-db headless service, configmap, and policies
+kubectl delete -f ${SCRIPTS_DIR}/yaml/ovn-setup.yaml
+sleep 2
+
+#kubectl get pods -n ovn-kubernetes
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-db-vip.yaml b/src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-db-vip.yaml
new file mode 100644 (file)
index 0000000..672581e
--- /dev/null
@@ -0,0 +1,140 @@
+# yamllint disable rule:hyphens rule:commas rule:indentation
+# ovnkube-db HA using Corosync/Pacemaker
+# daemonset version 3
+# starts ovn NB/SB ovsdb daemons in a single container
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ovnkube-db
+  # namespace set up by install
+  namespace: ovn-kubernetes
+  annotations:
+    kubernetes.io/description: |
+      This daemonset launches the OVN NB/SB DB server in a single container.
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 3
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      name: ovnkube-db
+  strategy:
+    rollingUpdate:
+      maxSurge: 25%
+      maxUnavailable: 25%
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: ovnkube-db
+        component: network
+        type: infra
+        openshift.io/component: network
+        beta.kubernetes.io/os: "linux"
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: ovn
+      hostNetwork: true
+      # required to be scheduled on node with openvswitch.org/ovnkube-db=true label but can
+      # only have one instance per node
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: openvswitch.org/ovnkube-db
+                operator: In
+                values:
+                - "true"
+        podAntiAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+          - labelSelector:
+              matchExpressions:
+              - key: name
+                operator: In
+                values:
+                - ovnkube-db
+            topologyKey: kubernetes.io/hostname
+
+      containers:
+      # ovsdb with corosync in one container - v3
+      - name: ovsdb
+        image: "docker.io/ovnkube/ovndb-vip-u:latest"
+        imagePullPolicy: "IfNotPresent"
+        command: ["/root/ovndb-vip.sh", "run-ovndb"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["NET_ADMIN"]
+
+        volumeMounts:
+        # ovn db is stored in the pod in /etc/openvswitch
+        # and on the host in /var/lib/openvswitch/
+        - mountPath: /etc/openvswitch/
+          name: host-var-lib-ovs
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+        - mountPath: /etc/corosync
+          name: host-etc-corosync
+        - mountPath: /var/log/corosync
+          name: host-var-log-corosync
+        - mountPath: /dev/shm
+          name: dshm
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 1024Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVN_LOG_NB
+          value: "-vconsole:info -vfile:info"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        - name: OVN_DB_VIP
+          value: ""
+        ports:
+        - name: healthz
+          containerPort: 10256
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10256
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      volumes:
+      - name: host-var-lib-ovs
+        hostPath:
+          path: /var/lib/openvswitch
+      - name: host-var-log-ovs
+        hostPath:
+          path: /var/log/openvswitch
+      - name: host-var-log-corosync
+        hostPath:
+          path: /var/log/corosync
+      - name: host-etc-corosync
+        hostPath:
+          path: /etc/corosync
+      - name: dshm
+        emptyDir:
+          medium: Memory
+
+      tolerations:
+      - operator: "Exists"
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-db.yaml b/src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-db.yaml
new file mode 100644 (file)
index 0000000..602e27b
--- /dev/null
@@ -0,0 +1,162 @@
+# yamllint disable rule:hyphens rule:commas rule:indentation
+# ovnkube-db
+# daemonset version 3
+# starts ovn NB/SB ovsdb daemons, each in a separate container
+# it is running on master node for now, but does not need to be the case
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ovnkube-db
+  # namespace set up by install
+  namespace: ovn-kubernetes
+  annotations:
+    kubernetes.io/description: |
+      This daemonset launches the OVN NB/SB ovsdb service components.
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      name: ovnkube-db
+  strategy:
+    rollingUpdate:
+      maxSurge: 25%
+      maxUnavailable: 25%
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: ovnkube-db
+        component: network
+        type: infra
+        openshift.io/component: network
+        beta.kubernetes.io/os: "linux"
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: ovn
+      hostNetwork: true
+      containers:
+      # firewall rules for ovn - assumed to be setup
+      # iptables -A OS_FIREWALL_ALLOW -p tcp -m state --state NEW -m tcp --dport 6641 -j ACCEPT
+      # iptables -A OS_FIREWALL_ALLOW -p tcp -m state --state NEW -m tcp --dport 6642 -j ACCEPT
+
+      # nb-ovsdb - v3
+      - name: nb-ovsdb
+        image: "iecedge/ovn-daemonset:latest"
+        imagePullPolicy: "IfNotPresent"
+
+        command: ["/root/ovnkube.sh", "nb-ovsdb"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["NET_ADMIN"]
+
+        volumeMounts:
+        # ovn db is stored in the pod in /etc/openvswitch
+        # and on the host in /var/lib/openvswitch/
+        - mountPath: /etc/openvswitch/
+          name: host-var-lib-ovs
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVN_LOG_NB
+          value: "-vconsole:info -vfile:info"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: healthz
+          containerPort: 10256
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10256
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      # sb-ovsdb - v3
+      - name: sb-ovsdb
+        image: "iecedge/ovn-daemonset:latest"
+        imagePullPolicy: "IfNotPresent"
+
+        command: ["/root/ovnkube.sh", "sb-ovsdb"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["NET_ADMIN"]
+
+        volumeMounts:
+        # ovn db is stored in the pod in /etc/openvswitch
+        # and on the host in /var/lib/openvswitch/
+        - mountPath: /etc/openvswitch/
+          name: host-var-lib-ovs
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVN_LOG_SB
+          value: "-vconsole:info -vfile:info"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: healthz
+          containerPort: 10255
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10255
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/os: "linux"
+      volumes:
+      - name: host-var-lib-ovs
+        hostPath:
+          path: /var/lib/openvswitch
+      - name: host-var-log-ovs
+        hostPath:
+          path: /var/log/openvswitch
+      tolerations:
+      - operator: "Exists"
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-master.yaml b/src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-master.yaml
new file mode 100644 (file)
index 0000000..de271b5
--- /dev/null
@@ -0,0 +1,237 @@
+# yamllint disable rule:hyphens rule:commas rule:indentation
+# ovnkube-master
+# daemonset version 3
+# starts master daemons, each in a separate container
+# it is run on the master node(s)
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+  name: ovnkube-master
+  # namespace set up by install
+  namespace: ovn-kubernetes
+  annotations:
+    kubernetes.io/description: |
+      This daemonset launches the ovn-kubernetes networking components.
+spec:
+  progressDeadlineSeconds: 600
+  replicas: 1
+  revisionHistoryLimit: 10
+  selector:
+    matchLabels:
+      name: ovnkube-master
+  strategy:
+    rollingUpdate:
+      maxSurge: 25%
+      maxUnavailable: 25%
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        name: ovnkube-master
+        component: network
+        type: infra
+        openshift.io/component: network
+        beta.kubernetes.io/os: "linux"
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: ovn
+      hostNetwork: true
+
+      containers:
+
+      # run-ovn-northd - v3
+      - name: run-ovn-northd
+        image: "iecedge/ovn-daemonset:latest"
+        imagePullPolicy: "IfNotPresent"
+
+        command: ["/root/ovnkube.sh", "run-ovn-northd"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["SYS_NICE"]
+
+        volumeMounts:
+        # Run directories where we need to be able to access sockets
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVN_LOG_NORTHD
+          value: "-vconsole:info"
+        - name: OVN_NET_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: net_cidr
+        - name: OVN_SVC_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: svc_cidr
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: K8S_NODE
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: healthz
+          containerPort: 10257
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10257
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      - name: run-nbctld
+        image: "iecedge/ovn-daemonset:latest"
+        imagePullPolicy: "IfNotPresent"
+
+        command: ["/root/ovnkube.sh", "run-nbctld"]
+
+        securityContext:
+          runAsUser: 0
+
+        volumeMounts:
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+
+        ports:
+        - name: healthz
+          containerPort: 10260
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10258
+        #     scheme: HTTP
+        lifecycle:
+
+      - name: ovnkube-master
+        image: "iecedge/ovn-daemonset:latest"
+        imagePullPolicy: "IfNotPresent"
+
+        command: ["/root/ovnkube.sh", "ovn-master"]
+
+        securityContext:
+          runAsUser: 0
+
+        volumeMounts:
+        # Run directories where we need to be able to access sockets
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/log/ovn-kubernetes/
+          name: host-var-log-ovnkube
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVNKUBE_LOGLEVEL
+          value: "4"
+        - name: OVN_NET_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: net_cidr
+        - name: OVN_SVC_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: svc_cidr
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: K8S_NODE
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        ports:
+        - name: healthz
+          containerPort: 10254
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10254
+        #     scheme: HTTP
+        lifecycle:
+      # end of container
+
+      nodeSelector:
+        node-role.kubernetes.io/master: ""
+        beta.kubernetes.io/os: "linux"
+      volumes:
+      # TODO: Need to check why we need this?
+      - name: host-var-run-dbus
+        hostPath:
+          path: /var/run/dbus
+      - name: host-var-log-ovs
+        hostPath:
+          path: /var/log/openvswitch
+      - name: host-var-log-ovnkube
+        hostPath:
+          path: /var/log/ovn-kubernetes
+      - name: host-var-run-ovs
+        hostPath:
+          path: /var/run/openvswitch
+      tolerations:
+      - operator: "Exists"
diff --git a/src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-node.yaml b/src/foundation/scripts/cni/ovn-kubernetes/yaml/ovnkube-node.yaml
new file mode 100644 (file)
index 0000000..3c08485
--- /dev/null
@@ -0,0 +1,274 @@
+# yamllint disable rule:hyphens rule:commas rule:indentation
+---
+# ovnkube-node
+# daemonset version 3
+# starts node daemons for ovs and ovn, each in a separate container
+# it is run on all nodes
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: ovnkube-node
+  # namespace set up by install
+  namespace: ovn-kubernetes
+  annotations:
+    kubernetes.io/description: |
+      This daemonset launches the ovn-kubernetes networking components.
+spec:
+  selector:
+    matchLabels:
+      app: ovnkube-node
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: ovnkube-node
+        component: network
+        type: infra
+        openshift.io/component: network
+        beta.kubernetes.io/os: "linux"
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      # Requires fairly broad permissions - ability to read all services and network functions as well
+      # as all pods.
+      serviceAccountName: ovn
+      hostNetwork: true
+      hostPID: true
+      containers:
+
+      # ovsdb-server and ovs-switchd daemons
+      - name: ovs-daemons
+        image: "iecedge/ovn-daemonset:latest"
+        imagePullPolicy: "IfNotPresent"
+
+        command: ["/root/ovnkube.sh", "ovs-server"]
+
+        livenessProbe:
+          exec:
+            command:
+            - /usr/share/openvswitch/scripts/ovs-ctl
+            - status
+          initialDelaySeconds: 15
+          periodSeconds: 5
+
+        securityContext:
+          runAsUser: 0
+          # Permission could be reduced by selecting an appropriate SELinux policy
+          privileged: true
+
+        volumeMounts:
+        - mountPath: /lib/modules
+          name: host-modules
+          readOnly: true
+        - mountPath: /run/openvswitch
+          name: host-run-ovs
+        - mountPath: /var/run/openvswitch
+          name: host-var-run-ovs
+        - mountPath: /sys
+          name: host-sys
+          readOnly: true
+        - mountPath: /etc/openvswitch
+          name: host-config-openvswitch
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+          limits:
+            cpu: 200m
+            memory: 400Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        lifecycle:
+          preStop:
+            exec:
+              command: ["/root/ovnkube.sh", "cleanup-ovs-server"]
+
+      - name: ovn-controller
+        image: "iecedge/ovn-daemonset:latest"
+        imagePullPolicy: "IfNotPresent"
+
+        command: ["/root/ovnkube.sh", "ovn-controller"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["SYS_NICE"]
+
+        volumeMounts:
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/log/openvswitch/
+          name: host-var-log-ovs
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVNKUBE_LOGLEVEL
+          value: "4"
+        - name: OVN_NET_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: net_cidr
+        - name: OVN_SVC_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: svc_cidr
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: K8S_NODE
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: OVN_KUBERNETES_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+
+        ports:
+        - name: healthz
+          containerPort: 10258
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10258
+        #     scheme: HTTP
+        lifecycle:
+
+      - name: ovnkube-node
+        image: "iecedge/ovn-daemonset:latest"
+        imagePullPolicy: "IfNotPresent"
+
+        command: ["/root/ovnkube.sh", "ovn-node"]
+
+        securityContext:
+          runAsUser: 0
+          capabilities:
+            add: ["NET_ADMIN", "SYS_ADMIN", "SYS_PTRACE"]
+
+        volumeMounts:
+        - mountPath: /var/run/dbus/
+          name: host-var-run-dbus
+          readOnly: true
+        - mountPath: /var/log/ovn-kubernetes/
+          name: host-var-log-ovnkube
+        - mountPath: /var/run/openvswitch/
+          name: host-var-run-ovs
+        # We mount our socket here
+        - mountPath: /var/run/ovn-kubernetes
+          name: host-var-run-ovn-kubernetes
+        # CNI related mounts which we take over
+        - mountPath: /opt/cni/bin
+          name: host-opt-cni-bin
+        - mountPath: /etc/cni/net.d
+          name: host-etc-cni-netd
+
+        resources:
+          requests:
+            cpu: 100m
+            memory: 300Mi
+        env:
+        - name: OVN_DAEMONSET_VERSION
+          value: "3"
+        - name: OVNKUBE_LOGLEVEL
+          value: "5"
+        - name: OVN_NET_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: net_cidr
+        - name: OVN_SVC_CIDR
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: svc_cidr
+        - name: K8S_APISERVER
+          valueFrom:
+            configMapKeyRef:
+              name: ovn-config
+              key: k8s_apiserver
+        - name: K8S_NODE
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        - name: OVN_GATEWAY_MODE
+          value: "local"
+        - name: OVN_GATEWAY_OPTS
+          value: ""
+
+        ports:
+        - name: healthz
+          containerPort: 10259
+        # TODO: Temporarily disabled until we determine how to wait for clean default
+        # config
+        # livenessProbe:
+        #   initialDelaySeconds: 10
+        #   httpGet:
+        #     path: /healthz
+        #     port: 10259
+        #     scheme: HTTP
+        lifecycle:
+          preStop:
+            exec:
+              command: ["/root/ovnkube.sh", "cleanup-ovn-node"]
+
+      nodeSelector:
+        beta.kubernetes.io/os: "linux"
+      volumes:
+      - name: host-modules
+        hostPath:
+          path: /lib/modules
+
+      - name: host-var-run-dbus
+        hostPath:
+          path: /var/run/dbus
+      - name: host-var-log-ovs
+        hostPath:
+          path: /var/log/openvswitch
+      - name: host-var-log-ovnkube
+        hostPath:
+          path: /var/log/ovn-kubernetes
+      - name: host-run-ovs
+        hostPath:
+          path: /run/openvswitch
+      - name: host-var-run-ovs
+        hostPath:
+          path: /var/run/openvswitch
+      - name: host-var-run-ovn-kubernetes
+        hostPath:
+          path: /var/run/ovn-kubernetes
+      - name: host-sys
+        hostPath:
+          path: /sys
+      - name: host-opt-cni-bin
+        hostPath:
+          path: /opt/cni/bin
+      - name: host-etc-cni-netd
+        hostPath:
+          path: /etc/cni/net.d
+      - name: host-config-openvswitch
+        hostPath:
+          path: /etc/origin/openvswitch
+      tolerations:
+      - operator: "Exists"
index f4c27b0..8c4837c 100755 (executable)
@@ -29,6 +29,7 @@ K8S_WORKER_GROUP=(
 # K8s parameter
 CLUSTER_IP=172.16.1.136 # Align with the value in our K8s setup script
 POD_NETWORK_CIDR=192.168.0.0/16
+SVC_CIDR=172.16.1.0/24
 #IEC support three kinds network solution for Kubernetes: calico,flannel,contivpp
 CNI_TYPE=calico
 #kubernetes-cni version 0.7.5/ 0.6.0
index ab9a289..cdcf8bc 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/bash -ex
+# shellcheck source=/dev/null
 
 # For host setup as Kubernetes master
 MGMT_IP=$1
@@ -25,7 +26,8 @@ if ! kubectl get nodes; then
   fi
 
   mkdir -p "${HOME}/.kube"
-  sudo cp /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+  # shellcheck disable=SC2216
+  yes | sudo cp -rf /etc/kubernetes/admin.conf "${HOME}/.kube/config"
   sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
 
   sleep 5
index d1b27bc..fb02ff3 100755 (executable)
@@ -11,7 +11,9 @@ fi
 CLUSTER_IP=${1:-172.16.1.136} # Align with the value in our K8s setup script
 POD_NETWORK_CIDR=${2:-192.168.0.0/16}
 CNI_TYPE=${3:-calico}
-DEV_NAME=${4:-}
+K8S_MASTER_IP=${4:-10.169.41.173}
+SERVICE_CIDR=${5:-172.16.1.0/24}
+DEV_NAME=${6:-}
 
 SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}")
 
@@ -46,6 +48,34 @@ install_contivpp(){
   kubectl apply -f "${SCRIPTS_DIR}/cni/contivpp/contiv-vpp.yaml"
 }
 
+install_ovn_kubernetes(){
+  # Update the ovn-kubernetes yaml files
+
+  net_cidr_repl="{{ net_cidr | default('10.128.0.0/14/23') }}"
+  svc_cidr_repl="{{ svc_cidr | default('172.30.0.0/16') }}"
+  k8s_apiserver_repl="{{ k8s_apiserver.stdout }}"
+
+  k8s_apiserver="https://${K8S_MASTER_IP}:6443"
+  net_cidr="${POD_NETWORK_CIDR}"
+  svc_cidr="${SERVICE_CIDR}"
+
+  echo "net_cidr: ${net_cidr}"
+  echo "svc_cidr: ${svc_cidr}"
+  echo "k8s_apiserver: ${k8s_apiserver}"
+
+  sed "s,${net_cidr_repl},${net_cidr},
+  s,${svc_cidr_repl},${svc_cidr},
+  s,${k8s_apiserver_repl},${k8s_apiserver}," \
+  ${SCRIPTS_DIR}/cni/ovn-kubernetes/templates/ovn-setup.yaml.j2 > \
+  ${SCRIPTS_DIR}/cni/ovn-kubernetes/yaml/ovn-setup.yaml
+
+  # Install ovn-kubernetes by yaml files
+  # shellcheck source=/dev/null
+  source ${SCRIPTS_DIR}/cni/ovn-kubernetes/install-ovn-k8s.sh
+
+}
+
+
 case ${CNI_TYPE} in
  'calico')
         echo "Install calico ..."
@@ -59,6 +89,10 @@ case ${CNI_TYPE} in
         echo "Install Contiv-VPP ..."
         install_contivpp
         ;;
+ 'ovn-kubernetes')
+        echo "Install Ovn-Kubernetes ..."
+        install_ovn_kubernetes
+        ;;
  *)
         echo "${CNI_TYPE} is not supported"
         exit 1
index f9f137d..2bdc95f 100755 (executable)
@@ -47,7 +47,7 @@ deploy_k8s () {
            cd iec/src/foundation/scripts/ && source k8s_common.sh $KUBE_VERSION $CNI_VERSION"
 
   #Automatic deploy the K8s environments on Master node
-  SETUP_MASTER="cd iec/src/foundation/scripts/ && source k8s_master.sh ${K8S_MASTER_IP}"
+  SETUP_MASTER="cd iec/src/foundation/scripts/ && source k8s_master.sh ${K8S_MASTER_IP} ${POD_NETWORK_CIDR} ${SVC_CIDR}"
   sshpass -p ${K8S_MASTERPW} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${K8S_MASTER_IP} ${INSTALL_SOFTWARE}
   sshpass -p ${K8S_MASTERPW} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${K8S_MASTER_IP} ${SETUP_MASTER} | tee ${LOG_FILE}
 
@@ -83,7 +83,7 @@ deploy_k8s () {
 
 
   #Deploy etcd & CNI from master node
-  SETUP_CNI="cd iec/src/foundation/scripts && source setup-cni.sh $CLUSTER_IP $POD_NETWORK_CIDR $CNI_TYPE"
+  SETUP_CNI="cd iec/src/foundation/scripts && source setup-cni.sh $CLUSTER_IP $POD_NETWORK_CIDR $CNI_TYPE $K8S_MASTER_IP $SVC_CIDR"
   sshpass -p ${K8S_MASTERPW} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${K8S_MASTER_IP} ${SETUP_CNI}
   SETUP_HELM="cd iec/src/foundation/scripts && source helm.sh"
   sshpass -p ${K8S_MASTERPW} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${K8S_MASTER_IP} ${SETUP_HELM}
index ba78713..46594c2 160000 (submodule)
@@ -1 +1 @@
-Subproject commit ba78713d21db592772a37aaf62b17fa805b145d4
+Subproject commit 46594c27e2c6add879b151c1285c052c6b67004d