--- /dev/null
+#!/bin/bash
+set -o xtrace
+set -e
+
+not_clean=${1:-}
+
+if [ -z ${not_clean} ] ; then
+ # Clean the old openvswitch db info
+ echo "Clean old ovs/ovn running dir ..."
+ sudo rm -rf /var/lib/openvswitch
+ sudo rm -rf /etc/ovn
+ sudo rm -rf /var/run/ovn-kubernetes
+ sudo rm -rf /etc/origin/openvswitch
+fi
)
}
+# shellcheck source=/dev/null
+source ${SCRIPTS_DIR}/clean_old_ovs.sh
+
# Create OVN namespace, service accounts, ovnkube-db headless service, configmap, and policies
kubectl create -f ${SCRIPTS_DIR}/yaml/ovn-setup.yaml
-wait_for 5 'test $(kubectl get svc -n ovn-kubernetes | grep ovnkube-db -c ) -eq 1'
+wait_for 5 'test $(kubectl get configmap -n ovn-kubernetes | grep ovn-config -c ) -eq 1'
# Run ovnkube-db daemonset.
apiVersion: v1
kind: Namespace
metadata:
- annotations:
- openshift.io/node-selector: "beta.kubernetes.io/os=linux"
name: ovn-kubernetes
---
name: ovn
namespace: ovn-kubernetes
+---
+# for now throw in all the privileges to run a pod. we can fine grain it further later.
+
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: ovn-kubernetes
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+spec:
+ allowPrivilegeEscalation: true
+ allowedCapabilities:
+ - '*'
+ fsGroup:
+ rule: RunAsAny
+ privileged: true
+ runAsUser:
+ rule: RunAsAny
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ volumes:
+ - '*'
+ hostPID: true
+ hostIPC: true
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65536
+
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
- annotations:
- rbac.authorization.k8s.io/system-only: "true"
- name: system:ovn-reader
+ name: ovn-kubernetes
rules:
- apiGroups:
- ""
- - extensions
resources:
- pods
- namespaces
- - networkpolicies
- nodes
- verbs:
- - get
- - list
- - watch
+ - endpoints
+ - services
+ - configmaps
+ verbs: ["get", "list", "watch"]
- apiGroups:
+ - extensions
- networking.k8s.io
+ - apps
resources:
- networkpolicies
- verbs:
- - get
- - list
- - watch
+ - statefulsets
+ verbs: ["get", "list", "watch"]
- apiGroups:
- ""
resources:
- events
- verbs:
- - create
- - patch
- - update
-
----
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRoleBinding
-metadata:
- name: ovn-reader
-roleRef:
- name: system:ovn-reader
- kind: ClusterRole
- apiGroup: rbac.authorization.k8s.io
-subjects:
-- kind: ServiceAccount
- name: ovn
- namespace: ovn-kubernetes
+ - endpoints
+ - configmaps
+ verbs: ["create", "patch", "update"]
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ - pods
+ verbs: ["patch", "update"]
+- apiGroups:
+ - extensions
+ - policy
+ resources:
+ - podsecuritypolicies
+ resourceNames:
+ - ovn-kubernetes
+ verbs: ["use"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
- name: cluster-admin-0
+ name: ovn-kubernetes
roleRef:
- name: cluster-admin
+ name: ovn-kubernetes
kind: ClusterRole
apiGroup: rbac.authorization.k8s.io
subjects:
name: ovn
namespace: ovn-kubernetes
----
-# service to expose the ovnkube-db pod
-apiVersion: v1
-kind: Service
-metadata:
- name: ovnkube-db
- namespace: ovn-kubernetes
-spec:
- ports:
- - name: north
- port: 6641
- protocol: TCP
- targetPort: 6641
- - name: south
- port: 6642
- protocol: TCP
- targetPort: 6642
- sessionAffinity: None
- clusterIP: None
- type: ClusterIP
-
---
# The network cidr and service cidr are set in the ovn-config configmap
kind: ConfigMap
name: ovn-config
namespace: ovn-kubernetes
data:
- net_cidr: "{{ net_cidr | default('10.128.0.0/14/23') }}"
- svc_cidr: "{{ svc_cidr | default('172.30.0.0/16') }}"
- k8s_apiserver: "{{ k8s_apiserver.stdout }}"
+ net_cidr: "{{ net_cidr }}"
+ svc_cidr: "{{ svc_cidr }}"
+ k8s_apiserver: "{{ k8s_apiserver }}"
+ mtu: "{{ mtu_value }}"
--- /dev/null
+# service to expose the ovnkube-db pod
+apiVersion: v1
+kind: Service
+metadata:
+ name: ovnkube-db
+ namespace: ovn-kubernetes
+spec:
+ ports:
+ - name: north
+ port: 6641
+ protocol: TCP
+ targetPort: 6641
+ - name: south
+ port: 6642
+ protocol: TCP
+ targetPort: 6642
+ sessionAffinity: None
+ clusterIP: None
+ type: ClusterIP
+
+---
+
+# ovndb-raft PodDisruptBudget to prevent majority of ovnkube raft cluster
+# nodes from disruption
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: ovndb-raft-pdb
+ namespace: ovn-kubernetes
+spec:
+ minAvailable: {{ ovn_db_minAvailable | default(2) }}
+ selector:
+ matchLabels:
+ name: ovnkube-db
+
+---
+
+# ovnkube-db raft statefulset
+# daemonset version 3
+# starts ovn NB/SB ovsdb daemons, each in a separate container
+#
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: ovnkube-db
+ namespace: ovn-kubernetes
+ annotations:
+ kubernetes.io/description: |
+ This statefulset launches the OVN Northbound/Southbound Database raft clusters.
+spec:
+ serviceName: ovnkube-db
+ podManagementPolicy: "Parallel"
+ replicas: {{ ovn_db_replicas | default(3) }}
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ name: ovnkube-db
+ template:
+ metadata:
+ labels:
+ name: ovnkube-db
+ component: network
+ type: infra
+ kubernetes.io/os: "linux"
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ terminationGracePeriodSeconds: 30
+ imagePullSecrets:
+ - name: registry-credentials
+ serviceAccountName: ovn
+ hostNetwork: true
+
+ # required to be scheduled on node with k8s.ovn.org/ovnkube-db=true label but can
+ # only have one instance per node
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: k8s.ovn.org/ovnkube-db
+ operator: In
+ values:
+ - "true"
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: name
+ operator: In
+ values:
+ - ovnkube-db
+ topologyKey: kubernetes.io/hostname
+
+ containers:
+ # nb-ovsdb - v3
+ - name: nb-ovsdb
+ image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+ imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+ command: ["/root/ovnkube.sh", "nb-ovsdb-raft"]
+
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnnb-db-raft"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+
+ securityContext:
+ runAsUser: 0
+ capabilities:
+ add: ["NET_ADMIN"]
+
+ terminationMessagePolicy: FallbackToLogsOnError
+ volumeMounts:
+ # ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
+ # and on the host in /var/lib/openvswitch/
+ - mountPath: /etc/openvswitch/
+ name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
+ - mountPath: /var/log/openvswitch/
+ name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
+ - mountPath: /var/run/openvswitch/
+ name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 300Mi
+ env:
+ - name: OVN_DAEMONSET_VERSION
+ value: "3"
+ - name: OVN_LOGLEVEL_NB
+ value: "{{ ovn_loglevel_nb }}"
+ - name: K8S_APISERVER
+ valueFrom:
+ configMapKeyRef:
+ name: ovn-config
+ key: k8s_apiserver
+ - name: OVN_KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
+ # end of container
+
+ # sb-ovsdb - v3
+ - name: sb-ovsdb
+ image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+ imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+ command: ["/root/ovnkube.sh", "sb-ovsdb-raft"]
+
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnsb-db-raft"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+
+ securityContext:
+ runAsUser: 0
+ capabilities:
+ add: ["NET_ADMIN"]
+
+ terminationMessagePolicy: FallbackToLogsOnError
+ volumeMounts:
+ # ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
+ # and on the host in /var/lib/openvswitch/
+ - mountPath: /etc/openvswitch/
+ name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
+ - mountPath: /var/log/openvswitch/
+ name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
+ - mountPath: /var/run/openvswitch/
+ name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 300Mi
+ env:
+ - name: OVN_DAEMONSET_VERSION
+ value: "3"
+ - name: OVN_LOGLEVEL_SB
+ value: "{{ ovn_loglevel_sb }}"
+ - name: K8S_APISERVER
+ valueFrom:
+ configMapKeyRef:
+ name: ovn-config
+ key: k8s_apiserver
+ - name: OVN_KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
+ # end of container
+
+ # db-metrics-exporter - v3
+ - name: db-metrics-exporter
+ image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
+ imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
+ command: ["/root/ovnkube.sh", "db-raft-metrics"]
+
+ securityContext:
+ runAsUser: 0
+ capabilities:
+ add: ["NET_ADMIN"]
+
+ terminationMessagePolicy: FallbackToLogsOnError
+ volumeMounts:
+ # ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
+ # and on the host in /var/lib/openvswitch/
+ - mountPath: /etc/openvswitch/
+ name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
+ - mountPath: /var/run/openvswitch/
+ name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 300Mi
+ env:
+ - name: OVN_DAEMONSET_VERSION
+ value: "3"
+ - name: K8S_APISERVER
+ valueFrom:
+ configMapKeyRef:
+ name: ovn-config
+ key: k8s_apiserver
+ - name: OVN_KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
+ # end of container
+
+ volumes:
+ - name: host-var-log-ovs
+ hostPath:
+ path: /var/log/openvswitch
+ - name: host-var-lib-ovs
+ hostPath:
+ path: /var/lib/openvswitch
+ - name: host-var-run-ovs
+ hostPath:
+ path: /var/run/openvswitch
+ - name: host-ovn-cert
+ hostPath:
+ path: /etc/ovn
+ type: DirectoryOrCreate
+ tolerations:
+ - operator: "Exists"
+# service to expose the ovnkube-db pod
+apiVersion: v1
+kind: Service
+metadata:
+ name: ovnkube-db
+ namespace: ovn-kubernetes
+spec:
+ ports:
+ - name: north
+ port: 6641
+ protocol: TCP
+ targetPort: 6641
+ - name: south
+ port: 6642
+ protocol: TCP
+ targetPort: 6642
+ sessionAffinity: None
+ clusterIP: None
+ type: ClusterIP
+
+---
+
# ovnkube-db HA using Corosync/Pacemaker
# daemonset version 3
# starts ovn NB/SB ovsdb daemons in a single container
name: ovnkube-db
component: network
type: infra
- openshift.io/component: network
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
capabilities:
add: ["NET_ADMIN"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
# and on the host in /var/lib/openvswitch/
- mountPath: /etc/openvswitch/
name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
- mountPath: /etc/corosync
name: host-etc-corosync
- mountPath: /var/log/corosync
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVN_LOG_NB
- value: "-vconsole:info -vfile:info"
+ - name: OVN_LOGLEVEL_NB
+ value: "{{ ovn_loglevel_nb }}"
- name: K8S_APISERVER
valueFrom:
configMapKeyRef:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
- name: OVN_DB_VIP
value: "{{ ovn_db_vip }}"
- ports:
- - name: healthz
- containerPort: 10256
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10256
- # scheme: HTTP
- lifecycle:
# end of container
volumes:
+# service to expose the ovnkube-db pod
+apiVersion: v1
+kind: Service
+metadata:
+ name: ovnkube-db
+ namespace: ovn-kubernetes
+spec:
+ ports:
+ - name: north
+ port: 6641
+ protocol: TCP
+ targetPort: 6641
+ - name: south
+ port: 6642
+ protocol: TCP
+ targetPort: 6642
+ sessionAffinity: None
+ clusterIP: None
+ type: ClusterIP
+
+---
+
# ovnkube-db
# daemonset version 3
# starts ovn NB/SB ovsdb daemons, each in a separate container
-# it is running on master node for now, but does not need to be the case
+# it is running on master for now, but does not need to be the case
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovnkube-db
component: network
type: infra
- openshift.io/component: network
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
capabilities:
add: ["NET_ADMIN"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
# and on the host in /var/lib/openvswitch/
- mountPath: /etc/openvswitch/
name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
+ # for the iptables wrapper
+ - mountPath: /host
+ name: host-slash
+ readOnly: true
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVN_LOG_NB
- value: "-vconsole:info -vfile:info"
+ - name: OVN_LOGLEVEL_NB
+ value: "{{ ovn_loglevel_nb }}"
- name: K8S_APISERVER
valueFrom:
configMapKeyRef:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- ports:
- - name: healthz
- containerPort: 10256
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10256
- # scheme: HTTP
- lifecycle:
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnnb-db"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
# end of container
# sb-ovsdb - v3
capabilities:
add: ["NET_ADMIN"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
# and on the host in /var/lib/openvswitch/
- mountPath: /etc/openvswitch/
name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
+ # for the iptables wrapper
+ - mountPath: /host
+ name: host-slash
+ readOnly: true
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVN_LOG_SB
- value: "-vconsole:info -vfile:info"
+ - name: OVN_LOGLEVEL_SB
+ value: "{{ ovn_loglevel_sb }}"
- name: K8S_APISERVER
valueFrom:
configMapKeyRef:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- ports:
- - name: healthz
- containerPort: 10255
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10255
- # scheme: HTTP
- lifecycle:
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnsb-db"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+
# end of container
nodeSelector:
node-role.kubernetes.io/master: ""
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
volumes:
- name: host-var-lib-ovs
hostPath:
- name: host-var-log-ovs
hostPath:
path: /var/log/openvswitch
+ - name: host-slash
+ hostPath:
+ path: /
+ - name: host-ovn-cert
+ hostPath:
+ path: /etc/ovn
+ type: DirectoryOrCreate
tolerations:
- operator: "Exists"
# ovnkube-master
# daemonset version 3
# starts master daemons, each in a separate container
-# it is run on the master node(s)
+# it is run on the master(s)
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: ovn-kubernetes
annotations:
kubernetes.io/description: |
- This daemonset launches the ovn-kubernetes networking components.
+ This Deployment launches the ovn-kubernetes master networking components.
spec:
progressDeadlineSeconds: 600
replicas: 1
name: ovnkube-master
component: network
type: infra
- openshift.io/component: network
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: ovn
hostNetwork: true
- containers:
+ # required to be scheduled on a linux node with node-role.kubernetes.io/master label and
+ # only one instance of ovnkube-master pod per node
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - ""
+ - key: kubernetes.io/os
+ operator: In
+ values:
+ - "linux"
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: name
+ operator: In
+ values:
+ - ovnkube-master
+ topologyKey: kubernetes.io/hostname
- # run-ovn-northd - v3
- - name: run-ovn-northd
+ containers:
+ # ovn-northd - v3
+ - name: ovn-northd
image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
capabilities:
add: ["SYS_NICE"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# Run directories where we need to be able to access sockets
- mountPath: /var/run/dbus/
readOnly: true
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVN_LOG_NORTHD
- value: "-vconsole:info"
- - name: OVN_NET_CIDR
- valueFrom:
- configMapKeyRef:
- name: ovn-config
- key: net_cidr
- - name: OVN_SVC_CIDR
- valueFrom:
- configMapKeyRef:
- name: ovn-config
- key: svc_cidr
+ - name: OVN_LOGLEVEL_NORTHD
+ value: "{{ ovn_loglevel_northd }}"
- name: K8S_APISERVER
valueFrom:
configMapKeyRef:
name: ovn-config
key: k8s_apiserver
- - name: K8S_NODE
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- name: OVN_KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- ports:
- - name: healthz
- containerPort: 10257
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10257
- # scheme: HTTP
- lifecycle:
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovn-northd"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
# end of container
- - name: run-nbctld
+ - name: nbctl-daemon
image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}"
securityContext:
runAsUser: 0
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
-
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
cpu: 100m
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
+ - name: OVN_LOGLEVEL_NBCTLD
+ value: "{{ ovn_loglevel_nbctld }}"
- name: K8S_APISERVER
valueFrom:
configMapKeyRef:
name: ovn-config
key: k8s_apiserver
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
- ports:
- - name: healthz
- containerPort: 10260
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10258
- # scheme: HTTP
- lifecycle:
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovn-nbctld"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+ # end of container
- name: ovnkube-master
image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
securityContext:
runAsUser: 0
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# Run directories where we need to be able to access sockets
- mountPath: /var/run/dbus/
name: host-var-log-ovnkube
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
- name: OVN_DAEMONSET_VERSION
value: "3"
- name: OVNKUBE_LOGLEVEL
- value: "4"
+ value: "{{ ovnkube_master_loglevel }}"
- name: OVN_NET_CIDR
valueFrom:
configMapKeyRef:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- ports:
- - name: healthz
- containerPort: 10254
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10254
- # scheme: HTTP
- lifecycle:
+ - name: OVN_HYBRID_OVERLAY_ENABLE
+ value: "{{ ovn_hybrid_overlay_enable }}"
+ - name: OVN_HYBRID_OVERLAY_NET_CIDR
+ value: "{{ ovn_hybrid_overlay_net_cidr }}"
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
# end of container
- nodeSelector:
- node-role.kubernetes.io/master: ""
- beta.kubernetes.io/os: "linux"
volumes:
# TODO: Need to check why we need this?
- name: host-var-run-dbus
- name: host-var-run-ovs
hostPath:
path: /var/run/openvswitch
+ - name: host-ovn-cert
+ hostPath:
+ path: /etc/ovn
+ type: DirectoryOrCreate
tolerations:
- operator: "Exists"
--- /dev/null
+# define ServiceMontior and Service resources for ovnkube-master, ovnkube-node,
+# and ovnkube-db (required for prometheus monitoring)
+
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ labels:
+ k8s-app: ovnkube-master
+ name: monitor-ovnkube-master
+ namespace: ovn-kubernetes
+spec:
+ endpoints:
+ - interval: 30s
+ port: http-metrics
+ scheme: http
+ path: /metrics
+ jobLabel: k8s-app
+ namespaceSelector:
+ matchNames:
+ - ovn-kubernetes
+ selector:
+ matchLabels:
+ k8s-app: ovnkube-master
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ovnkube-master
+ name: ovn-kubernetes-master-prometheus-discovery
+ namespace: ovn-kubernetes
+spec:
+ selector:
+ name: ovnkube-master
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: http-metrics
+ port: 9409
+ protocol: TCP
+ targetPort: 9409
+---
+
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ labels:
+ k8s-app: ovnkube-node
+ name: monitor-ovnkube-node
+ namespace: ovn-kubernetes
+spec:
+ endpoints:
+ - interval: 30s
+ port: http-metrics
+ path: /metrics
+ scheme: http
+ jobLabel: k8s-app
+ namespaceSelector:
+ matchNames:
+ - ovn-kubernetes
+ selector:
+ matchLabels:
+ k8s-app: ovnkube-node
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ovnkube-node
+ name: ovn-kubernetes-node-prometheus-discovery
+ namespace: ovn-kubernetes
+spec:
+ selector:
+ name: ovnkube-node
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: http-metrics
+ port: 9410
+ protocol: TCP
+ targetPort: 9410
+
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ labels:
+ k8s-app: ovnkube-db
+ name: monitor-ovnkube-db
+ namespace: ovn-kubernetes
+spec:
+ endpoints:
+ - interval: 30s
+ port: http-metrics
+ path: /metrics
+ scheme: http
+ jobLabel: k8s-app
+ namespaceSelector:
+ matchNames:
+ - ovn-kubernetes
+ selector:
+ matchLabels:
+ k8s-app: ovnkube-db
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ovnkube-db
+ name: ovn-kubernetes-db-prometheus-discovery
+ namespace: ovn-kubernetes
+spec:
+ selector:
+ name: ovnkube-db
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: http-metrics
+ port: 9476
+ protocol: TCP
+ targetPort: 9476
namespace: ovn-kubernetes
annotations:
kubernetes.io/description: |
- This daemonset launches the ovn-kubernetes networking components.
+ This DaemonSet launches the ovn-kubernetes networking components for worker nodes.
spec:
selector:
matchLabels:
metadata:
labels:
app: ovnkube-node
+ name: ovnkube-node
component: network
type: infra
- openshift.io/component: network
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
command:
- /usr/share/openvswitch/scripts/ovs-ctl
- status
- initialDelaySeconds: 15
- periodSeconds: 5
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovs-daemons"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
securityContext:
runAsUser: 0
# Permission could be reduced by selecting an appropriate SELinux policy
privileged: true
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /lib/modules
name: host-modules
capabilities:
add: ["SYS_NICE"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/run/dbus/
name: host-var-run-dbus
readOnly: true
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVNKUBE_LOGLEVEL
- value: "4"
- - name: OVN_NET_CIDR
- valueFrom:
- configMapKeyRef:
- name: ovn-config
- key: net_cidr
- - name: OVN_SVC_CIDR
- valueFrom:
- configMapKeyRef:
- name: ovn-config
- key: svc_cidr
+ - name: OVN_LOG_CONTROLLER
+ value: "{{ ovn_loglevel_controller }}"
- name: K8S_APISERVER
valueFrom:
configMapKeyRef:
name: ovn-config
key: k8s_apiserver
- - name: K8S_NODE
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- name: OVN_KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
- ports:
- - name: healthz
- containerPort: 10258
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10258
- # scheme: HTTP
- lifecycle:
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovn-controller"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
- name: ovnkube-node
image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}"
runAsUser: 0
capabilities:
add: ["NET_ADMIN", "SYS_ADMIN", "SYS_PTRACE"]
+ {% if kind is defined and kind -%}
+ privileged: true
+ {% endif %}
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
+ # for the iptables wrapper
+ - mountPath: /host
+ name: host-slash
+ readOnly: true
- mountPath: /var/run/dbus/
name: host-var-run-dbus
readOnly: true
name: host-var-log-ovnkube
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
# We mount our socket here
- mountPath: /var/run/ovn-kubernetes
name: host-var-run-ovn-kubernetes
name: host-opt-cni-bin
- mountPath: /etc/cni/net.d
name: host-etc-cni-netd
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
+ {% if kind is defined and kind -%}
+ - mountPath: /var/run/netns
+ name: host-netns
+ mountPropagation: Bidirectional
+ {% endif %}
resources:
requests:
- name: OVN_DAEMONSET_VERSION
value: "3"
- name: OVNKUBE_LOGLEVEL
- value: "5"
+ value: "{{ ovnkube_node_loglevel }}"
- name: OVN_NET_CIDR
valueFrom:
configMapKeyRef:
configMapKeyRef:
name: ovn-config
key: k8s_apiserver
+ - name: OVN_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: ovn-config
+ key: mtu
- name: K8S_NODE
valueFrom:
fieldRef:
value: "{{ ovn_gateway_mode }}"
- name: OVN_GATEWAY_OPTS
value: "{{ ovn_gateway_opts }}"
+ - name: OVN_HYBRID_OVERLAY_ENABLE
+ value: "{{ ovn_hybrid_overlay_enable }}"
+ - name: OVN_HYBRID_OVERLAY_NET_CIDR
+ value: "{{ ovn_hybrid_overlay_net_cidr }}"
+ - name: OVN_SSL_ENABLE
+ value: "{{ ovn_ssl_en }}"
- ports:
- - name: healthz
- containerPort: 10259
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10259
- # scheme: HTTP
lifecycle:
preStop:
exec:
command: ["/root/ovnkube.sh", "cleanup-ovn-node"]
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnkube-node"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
nodeSelector:
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
- name: host-etc-cni-netd
hostPath:
path: /etc/cni/net.d
+ - name: host-ovn-cert
+ hostPath:
+ path: /etc/ovn
+ type: DirectoryOrCreate
+ - name: host-slash
+ hostPath:
+ path: /
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
+ {% if kind is defined and kind -%}
+ - name: host-netns
+ hostPath:
+ path: /var/run/netns
+ {% endif %}
+
tolerations:
- operator: "Exists"
--- /dev/null
+# yamllint disable rule:hyphens rule:commas rule:indentation
+---
+# ovn-namespace.yaml
+#
+# Setup for Kubernetes to support the ovn-kubernetes plugin
+#
+# Create the namespace for ovn-kubernetes.
+#
+# This provisioning is done as part of installation after the cluster is
+# up and before the ovn daemonsets are created.
+
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: ovn-kubernetes
+
+---
+# ovn-policy.yaml
+#
+# Setup for Kubernetes to support the ovn-kubernetes plugin
+#
+# Create the service account and policies.
+# ovnkube interacts with kubernetes and the environment
+# must be properly set up.
+#
+# This provisioning is done as part of installation after the cluster is
+# up and before the ovn daemonsets are created.
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ovn
+ namespace: ovn-kubernetes
+
+---
+# for now throw in all the privileges to run a pod. we can fine grain it further later.
+
+apiVersion: policy/v1beta1
+kind: PodSecurityPolicy
+metadata:
+ name: ovn-kubernetes
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+spec:
+ allowPrivilegeEscalation: true
+ allowedCapabilities:
+ - '*'
+ fsGroup:
+ rule: RunAsAny
+ privileged: true
+ runAsUser:
+ rule: RunAsAny
+ seLinux:
+ rule: RunAsAny
+ supplementalGroups:
+ rule: RunAsAny
+ volumes:
+ - '*'
+ hostPID: true
+ hostIPC: true
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65536
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ovn-kubernetes
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ - namespaces
+ - nodes
+ - endpoints
+ - services
+ - configmaps
+ verbs: ["get", "list", "watch"]
+- apiGroups:
+ - extensions
+ - networking.k8s.io
+ - apps
+ resources:
+ - networkpolicies
+ - statefulsets
+ verbs: ["get", "list", "watch"]
+- apiGroups:
+ - ""
+ resources:
+ - events
+ - endpoints
+ - configmaps
+ verbs: ["create", "patch", "update"]
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ - pods
+ verbs: ["patch", "update"]
+- apiGroups:
+ - extensions
+ - policy
+ resources:
+ - podsecuritypolicies
+ resourceNames:
+ - ovn-kubernetes
+ verbs: ["use"]
+
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ovn-kubernetes
+roleRef:
+ name: ovn-kubernetes
+ kind: ClusterRole
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+ name: ovn
+ namespace: ovn-kubernetes
+
+---
+# The network cidr and service cidr are set in the ovn-config configmap
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: ovn-config
+ namespace: ovn-kubernetes
+data:
+ net_cidr: "192.168.0.0/16"
+ svc_cidr: "172.16.1.0/24"
+ k8s_apiserver: "https://10.169.41.225:6443"
+ mtu: "1400"
--- /dev/null
+# yamllint disable rule:hyphens rule:commas rule:indentation
+# service to expose the ovnkube-db pod
+apiVersion: v1
+kind: Service
+metadata:
+ name: ovnkube-db
+ namespace: ovn-kubernetes
+spec:
+ ports:
+ - name: north
+ port: 6641
+ protocol: TCP
+ targetPort: 6641
+ - name: south
+ port: 6642
+ protocol: TCP
+ targetPort: 6642
+ sessionAffinity: None
+ clusterIP: None
+ type: ClusterIP
+
+---
+
+# ovndb-raft PodDisruptBudget to prevent majority of ovnkube raft cluster
+# nodes from disruption
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: ovndb-raft-pdb
+ namespace: ovn-kubernetes
+spec:
+ minAvailable: 2
+ selector:
+ matchLabels:
+ name: ovnkube-db
+
+---
+
+# ovnkube-db raft statefulset
+# daemonset version 3
+# starts ovn NB/SB ovsdb daemons, each in a separate container
+#
+kind: StatefulSet
+apiVersion: apps/v1
+metadata:
+ name: ovnkube-db
+ namespace: ovn-kubernetes
+ annotations:
+ kubernetes.io/description: |
+ This statefulset launches the OVN Northbound/Southbound Database raft clusters.
+spec:
+ serviceName: ovnkube-db
+ podManagementPolicy: "Parallel"
+ replicas: 3
+ revisionHistoryLimit: 10
+ selector:
+ matchLabels:
+ name: ovnkube-db
+ template:
+ metadata:
+ labels:
+ name: ovnkube-db
+ component: network
+ type: infra
+ kubernetes.io/os: "linux"
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ terminationGracePeriodSeconds: 30
+ imagePullSecrets:
+ - name: registry-credentials
+ serviceAccountName: ovn
+ hostNetwork: true
+
+ # required to be scheduled on node with k8s.ovn.org/ovnkube-db=true label but can
+ # only have one instance per node
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: k8s.ovn.org/ovnkube-db
+ operator: In
+ values:
+ - "true"
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: name
+ operator: In
+ values:
+ - ovnkube-db
+ topologyKey: kubernetes.io/hostname
+
+ containers:
+ # nb-ovsdb - v3
+ - name: nb-ovsdb
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
+ imagePullPolicy: "IfNotPresent"
+ command: ["/root/ovnkube.sh", "nb-ovsdb-raft"]
+
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnnb-db-raft"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+
+ securityContext:
+ runAsUser: 0
+ capabilities:
+ add: ["NET_ADMIN"]
+
+ terminationMessagePolicy: FallbackToLogsOnError
+ volumeMounts:
+ # ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
+ # and on the host in /var/lib/openvswitch/
+ - mountPath: /etc/openvswitch/
+ name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
+ - mountPath: /var/log/openvswitch/
+ name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
+ - mountPath: /var/run/openvswitch/
+ name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 300Mi
+ env:
+ - name: OVN_DAEMONSET_VERSION
+ value: "3"
+ - name: OVN_LOGLEVEL_NB
+ value: "-vconsole:info -vfile:info"
+ - name: K8S_APISERVER
+ valueFrom:
+ configMapKeyRef:
+ name: ovn-config
+ key: k8s_apiserver
+ - name: OVN_KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: OVN_SSL_ENABLE
+ value: "no"
+ # end of container
+
+ # sb-ovsdb - v3
+ - name: sb-ovsdb
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
+ imagePullPolicy: "IfNotPresent"
+ command: ["/root/ovnkube.sh", "sb-ovsdb-raft"]
+
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnsb-db-raft"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+
+ securityContext:
+ runAsUser: 0
+ capabilities:
+ add: ["NET_ADMIN"]
+
+ terminationMessagePolicy: FallbackToLogsOnError
+ volumeMounts:
+ # ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
+ # and on the host in /var/lib/openvswitch/
+ - mountPath: /etc/openvswitch/
+ name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
+ - mountPath: /var/log/openvswitch/
+ name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
+ - mountPath: /var/run/openvswitch/
+ name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 300Mi
+ env:
+ - name: OVN_DAEMONSET_VERSION
+ value: "3"
+ - name: OVN_LOGLEVEL_SB
+ value: "-vconsole:info -vfile:info"
+ - name: K8S_APISERVER
+ valueFrom:
+ configMapKeyRef:
+ name: ovn-config
+ key: k8s_apiserver
+ - name: OVN_KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: OVN_SSL_ENABLE
+ value: "no"
+ # end of container
+
+ # db-metrics-exporter - v3
+ - name: db-metrics-exporter
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
+ imagePullPolicy: "IfNotPresent"
+ command: ["/root/ovnkube.sh", "db-raft-metrics"]
+
+ securityContext:
+ runAsUser: 0
+ capabilities:
+ add: ["NET_ADMIN"]
+
+ terminationMessagePolicy: FallbackToLogsOnError
+ volumeMounts:
+ # ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
+ # and on the host in /var/lib/openvswitch/
+ - mountPath: /etc/openvswitch/
+ name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
+ - mountPath: /var/run/openvswitch/
+ name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
+
+ resources:
+ requests:
+ cpu: 100m
+ memory: 300Mi
+ env:
+ - name: OVN_DAEMONSET_VERSION
+ value: "3"
+ - name: K8S_APISERVER
+ valueFrom:
+ configMapKeyRef:
+ name: ovn-config
+ key: k8s_apiserver
+ - name: OVN_KUBERNETES_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: OVN_SSL_ENABLE
+ value: "no"
+ # end of container
+
+ volumes:
+ - name: host-var-log-ovs
+ hostPath:
+ path: /var/log/openvswitch
+ - name: host-var-lib-ovs
+ hostPath:
+ path: /var/lib/openvswitch
+ - name: host-var-run-ovs
+ hostPath:
+ path: /var/run/openvswitch
+ - name: host-ovn-cert
+ hostPath:
+ path: /etc/ovn
+ type: DirectoryOrCreate
+ tolerations:
+ - operator: "Exists"
# yamllint disable rule:hyphens rule:commas rule:indentation
+# service to expose the ovnkube-db pod
+apiVersion: v1
+kind: Service
+metadata:
+ name: ovnkube-db
+ namespace: ovn-kubernetes
+spec:
+ ports:
+ - name: north
+ port: 6641
+ protocol: TCP
+ targetPort: 6641
+ - name: south
+ port: 6642
+ protocol: TCP
+ targetPort: 6642
+ sessionAffinity: None
+ clusterIP: None
+ type: ClusterIP
+
+---
+
# ovnkube-db HA using Corosync/Pacemaker
# daemonset version 3
# starts ovn NB/SB ovsdb daemons in a single container
name: ovnkube-db
component: network
type: infra
- openshift.io/component: network
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
capabilities:
add: ["NET_ADMIN"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
# and on the host in /var/lib/openvswitch/
- mountPath: /etc/openvswitch/
name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
- mountPath: /etc/corosync
name: host-etc-corosync
- mountPath: /var/log/corosync
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVN_LOG_NB
+ - name: OVN_LOGLEVEL_NB
value: "-vconsole:info -vfile:info"
- name: K8S_APISERVER
valueFrom:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
- name: OVN_DB_VIP
value: ""
- ports:
- - name: healthz
- containerPort: 10256
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10256
- # scheme: HTTP
- lifecycle:
# end of container
volumes:
# yamllint disable rule:hyphens rule:commas rule:indentation
+# service to expose the ovnkube-db pod
+apiVersion: v1
+kind: Service
+metadata:
+ name: ovnkube-db
+ namespace: ovn-kubernetes
+spec:
+ ports:
+ - name: north
+ port: 6641
+ protocol: TCP
+ targetPort: 6641
+ - name: south
+ port: 6642
+ protocol: TCP
+ targetPort: 6642
+ sessionAffinity: None
+ clusterIP: None
+ type: ClusterIP
+
+---
+
# ovnkube-db
# daemonset version 3
# starts ovn NB/SB ovsdb daemons, each in a separate container
-# it is running on master node for now, but does not need to be the case
+# it is running on master for now, but does not need to be the case
kind: Deployment
apiVersion: apps/v1
metadata:
name: ovnkube-db
component: network
type: infra
- openshift.io/component: network
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# nb-ovsdb - v3
- name: nb-ovsdb
- image: "iecedge/ovn-daemonset:latest"
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
imagePullPolicy: "IfNotPresent"
command: ["/root/ovnkube.sh", "nb-ovsdb"]
capabilities:
add: ["NET_ADMIN"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
# and on the host in /var/lib/openvswitch/
- mountPath: /etc/openvswitch/
name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
+ # for the iptables wrapper
+ - mountPath: /host
+ name: host-slash
+ readOnly: true
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVN_LOG_NB
+ - name: OVN_LOGLEVEL_NB
value: "-vconsole:info -vfile:info"
- name: K8S_APISERVER
valueFrom:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- ports:
- - name: healthz
- containerPort: 10256
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10256
- # scheme: HTTP
- lifecycle:
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: OVN_SSL_ENABLE
+ value: "no"
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnnb-db"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
# end of container
# sb-ovsdb - v3
- name: sb-ovsdb
- image: "iecedge/ovn-daemonset:latest"
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
imagePullPolicy: "IfNotPresent"
command: ["/root/ovnkube.sh", "sb-ovsdb"]
capabilities:
add: ["NET_ADMIN"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# ovn db is stored in the pod in /etc/openvswitch
+ # (or in /etc/ovn if OVN from new repository is used)
# and on the host in /var/lib/openvswitch/
- mountPath: /etc/openvswitch/
name: host-var-lib-ovs
+ - mountPath: /etc/ovn/
+ name: host-var-lib-ovs
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
+ # for the iptables wrapper
+ - mountPath: /host
+ name: host-slash
+ readOnly: true
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVN_LOG_SB
+ - name: OVN_LOGLEVEL_SB
value: "-vconsole:info -vfile:info"
- name: K8S_APISERVER
valueFrom:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- ports:
- - name: healthz
- containerPort: 10255
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10255
- # scheme: HTTP
- lifecycle:
+ - name: K8S_NODE_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.hostIP
+ - name: OVN_SSL_ENABLE
+ value: "no"
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnsb-db"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+
# end of container
nodeSelector:
node-role.kubernetes.io/master: ""
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
volumes:
- name: host-var-lib-ovs
hostPath:
- name: host-var-log-ovs
hostPath:
path: /var/log/openvswitch
+ - name: host-slash
+ hostPath:
+ path: /
+ - name: host-ovn-cert
+ hostPath:
+ path: /etc/ovn
+ type: DirectoryOrCreate
tolerations:
- operator: "Exists"
# ovnkube-master
# daemonset version 3
# starts master daemons, each in a separate container
-# it is run on the master node(s)
+# it is run on the master(s)
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: ovn-kubernetes
annotations:
kubernetes.io/description: |
- This daemonset launches the ovn-kubernetes networking components.
+ This Deployment launches the ovn-kubernetes master networking components.
spec:
progressDeadlineSeconds: 600
replicas: 1
name: ovnkube-master
component: network
type: infra
- openshift.io/component: network
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: ovn
hostNetwork: true
- containers:
+ # required to be scheduled on a linux node with node-role.kubernetes.io/master label and
+ # only one instance of ovnkube-master pod per node
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: node-role.kubernetes.io/master
+ operator: In
+ values:
+ - ""
+ - key: kubernetes.io/os
+ operator: In
+ values:
+ - "linux"
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: name
+ operator: In
+ values:
+ - ovnkube-master
+ topologyKey: kubernetes.io/hostname
- # run-ovn-northd - v3
- - name: run-ovn-northd
- image: "iecedge/ovn-daemonset:latest"
+ containers:
+ # ovn-northd - v3
+ - name: ovn-northd
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
imagePullPolicy: "IfNotPresent"
command: ["/root/ovnkube.sh", "run-ovn-northd"]
capabilities:
add: ["SYS_NICE"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# Run directories where we need to be able to access sockets
- mountPath: /var/run/dbus/
readOnly: true
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVN_LOG_NORTHD
- value: "-vconsole:info"
- - name: OVN_NET_CIDR
- valueFrom:
- configMapKeyRef:
- name: ovn-config
- key: net_cidr
- - name: OVN_SVC_CIDR
- valueFrom:
- configMapKeyRef:
- name: ovn-config
- key: svc_cidr
+ - name: OVN_LOGLEVEL_NORTHD
+ value: "-vconsole:info -vfile:info"
- name: K8S_APISERVER
valueFrom:
configMapKeyRef:
name: ovn-config
key: k8s_apiserver
- - name: K8S_NODE
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- name: OVN_KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- ports:
- - name: healthz
- containerPort: 10257
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10257
- # scheme: HTTP
- lifecycle:
+ - name: OVN_SSL_ENABLE
+ value: "no"
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovn-northd"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
# end of container
- - name: run-nbctld
- image: "iecedge/ovn-daemonset:latest"
+ - name: nbctl-daemon
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
imagePullPolicy: "IfNotPresent"
command: ["/root/ovnkube.sh", "run-nbctld"]
securityContext:
runAsUser: 0
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
-
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
cpu: 100m
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
+ - name: OVN_LOGLEVEL_NBCTLD
+ value: "-vconsole:info"
- name: K8S_APISERVER
valueFrom:
configMapKeyRef:
name: ovn-config
key: k8s_apiserver
+ - name: OVN_SSL_ENABLE
+ value: "no"
- ports:
- - name: healthz
- containerPort: 10260
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10258
- # scheme: HTTP
- lifecycle:
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovn-nbctld"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+ # end of container
- name: ovnkube-master
- image: "iecedge/ovn-daemonset:latest"
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
imagePullPolicy: "IfNotPresent"
command: ["/root/ovnkube.sh", "ovn-master"]
securityContext:
runAsUser: 0
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
# Run directories where we need to be able to access sockets
- mountPath: /var/run/dbus/
name: host-var-log-ovnkube
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- ports:
- - name: healthz
- containerPort: 10254
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10254
- # scheme: HTTP
- lifecycle:
+ - name: OVN_HYBRID_OVERLAY_ENABLE
+ value: ""
+ - name: OVN_HYBRID_OVERLAY_NET_CIDR
+ value: ""
+ - name: OVN_SSL_ENABLE
+ value: "no"
# end of container
- nodeSelector:
- node-role.kubernetes.io/master: ""
- beta.kubernetes.io/os: "linux"
volumes:
# TODO: Need to check why we need this?
- name: host-var-run-dbus
- name: host-var-run-ovs
hostPath:
path: /var/run/openvswitch
+ - name: host-ovn-cert
+ hostPath:
+ path: /etc/ovn
+ type: DirectoryOrCreate
tolerations:
- operator: "Exists"
--- /dev/null
+# yamllint disable rule:hyphens rule:commas rule:indentation
+# define ServiceMontior and Service resources for ovnkube-master, ovnkube-node,
+# and ovnkube-db (required for prometheus monitoring)
+
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ labels:
+ k8s-app: ovnkube-master
+ name: monitor-ovnkube-master
+ namespace: ovn-kubernetes
+spec:
+ endpoints:
+ - interval: 30s
+ port: http-metrics
+ scheme: http
+ path: /metrics
+ jobLabel: k8s-app
+ namespaceSelector:
+ matchNames:
+ - ovn-kubernetes
+ selector:
+ matchLabels:
+ k8s-app: ovnkube-master
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ovnkube-master
+ name: ovn-kubernetes-master-prometheus-discovery
+ namespace: ovn-kubernetes
+spec:
+ selector:
+ name: ovnkube-master
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: http-metrics
+ port: 9409
+ protocol: TCP
+ targetPort: 9409
+---
+
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ labels:
+ k8s-app: ovnkube-node
+ name: monitor-ovnkube-node
+ namespace: ovn-kubernetes
+spec:
+ endpoints:
+ - interval: 30s
+ port: http-metrics
+ path: /metrics
+ scheme: http
+ jobLabel: k8s-app
+ namespaceSelector:
+ matchNames:
+ - ovn-kubernetes
+ selector:
+ matchLabels:
+ k8s-app: ovnkube-node
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ovnkube-node
+ name: ovn-kubernetes-node-prometheus-discovery
+ namespace: ovn-kubernetes
+spec:
+ selector:
+ name: ovnkube-node
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: http-metrics
+ port: 9410
+ protocol: TCP
+ targetPort: 9410
+
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+ labels:
+ k8s-app: ovnkube-db
+ name: monitor-ovnkube-db
+ namespace: ovn-kubernetes
+spec:
+ endpoints:
+ - interval: 30s
+ port: http-metrics
+ path: /metrics
+ scheme: http
+ jobLabel: k8s-app
+ namespaceSelector:
+ matchNames:
+ - ovn-kubernetes
+ selector:
+ matchLabels:
+ k8s-app: ovnkube-db
+---
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: ovnkube-db
+ name: ovn-kubernetes-db-prometheus-discovery
+ namespace: ovn-kubernetes
+spec:
+ selector:
+ name: ovnkube-db
+ type: ClusterIP
+ clusterIP: None
+ publishNotReadyAddresses: true
+ ports:
+ - name: http-metrics
+ port: 9476
+ protocol: TCP
+ targetPort: 9476
namespace: ovn-kubernetes
annotations:
kubernetes.io/description: |
- This daemonset launches the ovn-kubernetes networking components.
+ This DaemonSet launches the ovn-kubernetes networking components for worker nodes.
spec:
selector:
matchLabels:
metadata:
labels:
app: ovnkube-node
+ name: ovnkube-node
component: network
type: infra
- openshift.io/component: network
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# ovsdb-server and ovs-switchd daemons
- name: ovs-daemons
- image: "iecedge/ovn-daemonset:latest"
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
imagePullPolicy: "IfNotPresent"
command: ["/root/ovnkube.sh", "ovs-server"]
command:
- /usr/share/openvswitch/scripts/ovs-ctl
- status
- initialDelaySeconds: 15
- periodSeconds: 5
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovs-daemons"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
securityContext:
runAsUser: 0
# Permission could be reduced by selecting an appropriate SELinux policy
privileged: true
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /lib/modules
name: host-modules
command: ["/root/ovnkube.sh", "cleanup-ovs-server"]
- name: ovn-controller
- image: "iecedge/ovn-daemonset:latest"
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
imagePullPolicy: "IfNotPresent"
command: ["/root/ovnkube.sh", "ovn-controller"]
capabilities:
add: ["SYS_NICE"]
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/run/dbus/
name: host-var-run-dbus
readOnly: true
- mountPath: /var/log/openvswitch/
name: host-var-log-ovs
+ - mountPath: /var/log/ovn/
+ name: host-var-log-ovs
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
resources:
requests:
env:
- name: OVN_DAEMONSET_VERSION
value: "3"
- - name: OVNKUBE_LOGLEVEL
- value: "4"
- - name: OVN_NET_CIDR
- valueFrom:
- configMapKeyRef:
- name: ovn-config
- key: net_cidr
- - name: OVN_SVC_CIDR
- valueFrom:
- configMapKeyRef:
- name: ovn-config
- key: svc_cidr
+ - name: OVN_LOG_CONTROLLER
+ value: "-vconsole:info"
- name: K8S_APISERVER
valueFrom:
configMapKeyRef:
name: ovn-config
key: k8s_apiserver
- - name: K8S_NODE
- valueFrom:
- fieldRef:
- fieldPath: spec.nodeName
- name: OVN_KUBERNETES_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
+ - name: OVN_SSL_ENABLE
+ value: "no"
- ports:
- - name: healthz
- containerPort: 10258
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10258
- # scheme: HTTP
- lifecycle:
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovn-controller"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
- name: ovnkube-node
- image: "iecedge/ovn-daemonset:latest"
+ image: "iecedge/ovn-daemonset-ubuntu:2020-04-16"
imagePullPolicy: "IfNotPresent"
command: ["/root/ovnkube.sh", "ovn-node"]
capabilities:
add: ["NET_ADMIN", "SYS_ADMIN", "SYS_PTRACE"]
+
+ terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
+ # for the iptables wrapper
+ - mountPath: /host
+ name: host-slash
+ readOnly: true
- mountPath: /var/run/dbus/
name: host-var-run-dbus
readOnly: true
name: host-var-log-ovnkube
- mountPath: /var/run/openvswitch/
name: host-var-run-ovs
+ - mountPath: /var/run/ovn/
+ name: host-var-run-ovs
# We mount our socket here
- mountPath: /var/run/ovn-kubernetes
name: host-var-run-ovn-kubernetes
name: host-opt-cni-bin
- mountPath: /etc/cni/net.d
name: host-etc-cni-netd
+ - mountPath: /ovn-cert
+ name: host-ovn-cert
+ readOnly: true
+
resources:
requests:
- name: OVN_DAEMONSET_VERSION
value: "3"
- name: OVNKUBE_LOGLEVEL
- value: "5"
+ value: "4"
- name: OVN_NET_CIDR
valueFrom:
configMapKeyRef:
configMapKeyRef:
name: ovn-config
key: k8s_apiserver
+ - name: OVN_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: ovn-config
+ key: mtu
- name: K8S_NODE
valueFrom:
fieldRef:
value: "local"
- name: OVN_GATEWAY_OPTS
value: ""
+ - name: OVN_HYBRID_OVERLAY_ENABLE
+ value: ""
+ - name: OVN_HYBRID_OVERLAY_NET_CIDR
+ value: ""
+ - name: OVN_SSL_ENABLE
+ value: "no"
- ports:
- - name: healthz
- containerPort: 10259
- # TODO: Temporarily disabled until we determine how to wait for clean default
- # config
- # livenessProbe:
- # initialDelaySeconds: 10
- # httpGet:
- # path: /healthz
- # port: 10259
- # scheme: HTTP
lifecycle:
preStop:
exec:
command: ["/root/ovnkube.sh", "cleanup-ovn-node"]
+ readinessProbe:
+ exec:
+ command: ["/usr/bin/ovn-kube-util", "readiness-probe", "-t", "ovnkube-node"]
+ initialDelaySeconds: 30
+ timeoutSeconds: 30
+ periodSeconds: 60
nodeSelector:
- beta.kubernetes.io/os: "linux"
+ kubernetes.io/os: "linux"
volumes:
- name: host-modules
hostPath:
- name: host-etc-cni-netd
hostPath:
path: /etc/cni/net.d
+ - name: host-ovn-cert
+ hostPath:
+ path: /etc/ovn
+ type: DirectoryOrCreate
+ - name: host-slash
+ hostPath:
+ path: /
- name: host-config-openvswitch
hostPath:
path: /etc/origin/openvswitch
+
+
tolerations:
- operator: "Exists"
install_ovn_kubernetes(){
# Update the ovn-kubernetes yaml files
- net_cidr_repl="{{ net_cidr | default('10.128.0.0/14/23') }}"
- svc_cidr_repl="{{ svc_cidr | default('172.30.0.0/16') }}"
- k8s_apiserver_repl="{{ k8s_apiserver.stdout }}"
+ net_cidr_repl="{{ net_cidr }}"
+ svc_cidr_repl="{{ svc_cidr }}"
+ k8s_apiserver_repl="{{ k8s_apiserver }}"
+ mtu_repl="{{ mtu_value }}"
k8s_apiserver="https://${K8S_MASTER_IP}:6443"
net_cidr="${POD_NETWORK_CIDR}"
svc_cidr="${SERVICE_CIDR}"
+ mtu_def_value=1400
echo "net_cidr: ${net_cidr}"
echo "svc_cidr: ${svc_cidr}"
echo "k8s_apiserver: ${k8s_apiserver}"
+ echo "mtu: ${mtu_def_value}"
sed "s,${net_cidr_repl},${net_cidr},
s,${svc_cidr_repl},${svc_cidr},
- s,${k8s_apiserver_repl},${k8s_apiserver}," \
+ s,${k8s_apiserver_repl},${k8s_apiserver},
+ s,${mtu_repl},${mtu_def_value}," \
${SCRIPTS_DIR}/cni/ovn-kubernetes/templates/ovn-setup.yaml.j2 > \
${SCRIPTS_DIR}/cni/ovn-kubernetes/yaml/ovn-setup.yaml
CILIUM_CONFIG="cd iec/src/foundation/scripts/cni/cilium && sudo ./cilium_install.sh"
sshpass -p ${passwd} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${ip_addr} $CILIUM_CONFIG
;;
+ ovn-kubernetes)
+ OVN_KUBERNETES_PRECONFIG="cd iec/src/foundation/scripts/cni/ovn-kubernetes && ./clean_old_ovs.sh"
+ sshpass -p ${passwd} ssh -o StrictHostKeyChecking=no ${HOST_USER}@${ip_addr} $OVN_KUBERNETES_PRECONFIG
+ ;;
*)
;;
esac