--- # ovnkube-node # daemonset version 3 # starts node daemons for ovs and ovn, each in a separate container # it is run on all nodes kind: DaemonSet apiVersion: apps/v1 metadata: name: ovnkube-node # namespace set up by install namespace: ovn-kubernetes annotations: kubernetes.io/description: | This daemonset launches the ovn-kubernetes networking components. spec: selector: matchLabels: app: ovnkube-node updateStrategy: type: RollingUpdate template: metadata: labels: app: ovnkube-node component: network type: infra openshift.io/component: network beta.kubernetes.io/os: "linux" annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: # Requires fairly broad permissions - ability to read all services and network functions as well # as all pods. serviceAccountName: ovn hostNetwork: true hostPID: true containers: # ovsdb-server and ovs-switchd daemons - name: ovs-daemons image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}" imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}" command: ["/root/ovnkube.sh", "ovs-server"] livenessProbe: exec: command: - /usr/share/openvswitch/scripts/ovs-ctl - status initialDelaySeconds: 15 periodSeconds: 5 securityContext: runAsUser: 0 # Permission could be reduced by selecting an appropriate SELinux policy privileged: true volumeMounts: - mountPath: /lib/modules name: host-modules readOnly: true - mountPath: /run/openvswitch name: host-run-ovs - mountPath: /var/run/openvswitch name: host-var-run-ovs - mountPath: /sys name: host-sys readOnly: true - mountPath: /etc/openvswitch name: host-config-openvswitch resources: requests: cpu: 100m memory: 300Mi limits: cpu: 200m memory: 400Mi env: - name: OVN_DAEMONSET_VERSION value: "3" - name: K8S_APISERVER valueFrom: configMapKeyRef: name: ovn-config key: k8s_apiserver lifecycle: preStop: exec: command: ["/root/ovnkube.sh", "cleanup-ovs-server"] - name: ovn-controller image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}" imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}" command: ["/root/ovnkube.sh", "ovn-controller"] securityContext: runAsUser: 0 capabilities: add: ["SYS_NICE"] volumeMounts: - mountPath: /var/run/dbus/ name: host-var-run-dbus readOnly: true - mountPath: /var/log/openvswitch/ name: host-var-log-ovs - mountPath: /var/run/openvswitch/ name: host-var-run-ovs resources: requests: cpu: 100m memory: 300Mi env: - name: OVN_DAEMONSET_VERSION value: "3" - name: OVNKUBE_LOGLEVEL value: "4" - name: OVN_NET_CIDR valueFrom: configMapKeyRef: name: ovn-config key: net_cidr - name: OVN_SVC_CIDR valueFrom: configMapKeyRef: name: ovn-config key: svc_cidr - name: K8S_APISERVER valueFrom: configMapKeyRef: name: ovn-config key: k8s_apiserver - name: K8S_NODE valueFrom: fieldRef: fieldPath: spec.nodeName - name: OVN_KUBERNETES_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace ports: - name: healthz containerPort: 10258 # TODO: Temporarily disabled until we determine how to wait for clean default # config # livenessProbe: # initialDelaySeconds: 10 # httpGet: # path: /healthz # port: 10258 # scheme: HTTP lifecycle: - name: ovnkube-node image: "{{ ovn_image | default('docker.io/ovnkube/ovn-daemonset:latest') }}" imagePullPolicy: "{{ ovn_image_pull_policy | default('IfNotPresent') }}" command: ["/root/ovnkube.sh", "ovn-node"] securityContext: runAsUser: 0 capabilities: add: ["NET_ADMIN", "SYS_ADMIN", "SYS_PTRACE"] volumeMounts: - mountPath: /var/run/dbus/ name: host-var-run-dbus readOnly: true - mountPath: /var/log/ovn-kubernetes/ name: host-var-log-ovnkube - mountPath: /var/run/openvswitch/ name: host-var-run-ovs # We mount our socket here - mountPath: /var/run/ovn-kubernetes name: host-var-run-ovn-kubernetes # CNI related mounts which we take over - mountPath: /opt/cni/bin name: host-opt-cni-bin - mountPath: /etc/cni/net.d name: host-etc-cni-netd resources: requests: cpu: 100m memory: 300Mi env: - name: OVN_DAEMONSET_VERSION value: "3" - name: OVNKUBE_LOGLEVEL value: "5" - name: OVN_NET_CIDR valueFrom: configMapKeyRef: name: ovn-config key: net_cidr - name: OVN_SVC_CIDR valueFrom: configMapKeyRef: name: ovn-config key: svc_cidr - name: K8S_APISERVER valueFrom: configMapKeyRef: name: ovn-config key: k8s_apiserver - name: K8S_NODE valueFrom: fieldRef: fieldPath: spec.nodeName - name: OVN_GATEWAY_MODE value: "{{ ovn_gateway_mode }}" - name: OVN_GATEWAY_OPTS value: "{{ ovn_gateway_opts }}" ports: - name: healthz containerPort: 10259 # TODO: Temporarily disabled until we determine how to wait for clean default # config # livenessProbe: # initialDelaySeconds: 10 # httpGet: # path: /healthz # port: 10259 # scheme: HTTP lifecycle: preStop: exec: command: ["/root/ovnkube.sh", "cleanup-ovn-node"] nodeSelector: beta.kubernetes.io/os: "linux" volumes: - name: host-modules hostPath: path: /lib/modules - name: host-var-run-dbus hostPath: path: /var/run/dbus - name: host-var-log-ovs hostPath: path: /var/log/openvswitch - name: host-var-log-ovnkube hostPath: path: /var/log/ovn-kubernetes - name: host-run-ovs hostPath: path: /run/openvswitch - name: host-var-run-ovs hostPath: path: /var/run/openvswitch - name: host-var-run-ovn-kubernetes hostPath: path: /var/run/ovn-kubernetes - name: host-sys hostPath: path: /sys - name: host-opt-cni-bin hostPath: path: /opt/cni/bin - name: host-etc-cni-netd hostPath: path: /etc/cni/net.d - name: host-config-openvswitch hostPath: path: /etc/origin/openvswitch tolerations: - operator: "Exists"