Add KubeVirt testing using PV 70/4870/1
authorTodd Malsbary <todd.malsbary@intel.com>
Tue, 17 May 2022 18:26:43 +0000 (11:26 -0700)
committerTodd Malsbary <todd.malsbary@intel.com>
Tue, 17 May 2022 18:26:43 +0000 (11:26 -0700)
Signed-off-by: Todd Malsbary <todd.malsbary@intel.com>
Change-Id: I51afa7a84787238f0a006228a04471ba86bb83f1

Vagrantfile
deploy/addons/addons.sh
deploy/addons/kubevirt-test.yaml [new file with mode: 0644]

index c97adfb..3570898 100644 (file)
@@ -197,7 +197,7 @@ Vagrant.configure("2") do |config|
           # disk for the machine
           libvirt.storage :file, :size => 50, :type => 'raw', :cache => 'none'
           # Create an additional disk for cStor
-          libvirt.storage :file, :size => 10, :type => 'raw', :cache => 'none'
+          libvirt.storage :file, :size => 16, :type => 'raw', :cache => 'none'
           # Management attach is false so that vagrant will not interfere
           # with these machines: the jump server will manage them
           # completely
index 672096e..fcaafc9 100755 (executable)
@@ -113,6 +113,27 @@ function test_openebs {
     kubectl --kubeconfig=${cluster_kubeconfig} delete -f ${SCRIPTDIR}/openebs-cstor.yaml
 }
 
+function is_vm_reachable {
+    local -r cluster_name=${CLUSTER_NAME:-icn}
+    local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
+    local -r node_port=$(kubectl --kubeconfig=${cluster_kubeconfig} -n kubevirt-test get service/test-vm-service -o jsonpath='{.spec.ports[].nodePort}')
+    local -r node=$(kubectl -n metal3 get cluster/${cluster_name} -o jsonpath='{.spec.controlPlaneEndpoint.host}')
+    sshpass -p testuser ssh testuser@${node} -p ${node_port} -- uptime
+}
+
+function test_kubevirt {
+    local -r cluster_name=${CLUSTER_NAME:-icn}
+    local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
+    kubectl --kubeconfig=${cluster_kubeconfig} create ns kubevirt-test
+    kubectl --kubeconfig=${cluster_kubeconfig} -n kubevirt-test create rolebinding psp:privileged-kubevirt-test --clusterrole=psp:privileged --group=system:serviceaccounts:kubevirt-test
+    kubectl --kubeconfig=${cluster_kubeconfig} apply -f ${SCRIPTDIR}/kubevirt-test.yaml
+    WAIT_FOR_TRIES=30
+    wait_for is_vm_reachable
+    kubectl --kubeconfig=${cluster_kubeconfig} delete -f ${SCRIPTDIR}/kubevirt-test.yaml
+    kubectl --kubeconfig=${cluster_kubeconfig} -n kubevirt-test delete rolebinding psp:privileged-kubevirt-test
+    kubectl --kubeconfig=${cluster_kubeconfig} delete ns kubevirt-test
+}
+
 function test_addons {
     install_deps
 
@@ -131,10 +152,6 @@ function test_addons {
     pushd ${KUDPATH}/kud/tests
     failed_tests=""
     container_runtime=$(KUBECONFIG=${cluster_kubeconfig} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
-    # TODO Temporarily remove kubevirt from kud_tests below.  The
-    # kubevirt self-test needs AllowTcpForwarding yes in
-    # /etc/ssh/sshd_config which is currently disabled by the OS
-    # security hardening.
     if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
         # With containerd 1.2.13, the qat test container image fails to unpack.
         kud_tests="topology-manager-sriov:sriov-network multus:multus-cni ovn4nfv:nodus-network nfd:node-feature-discovery sriov-network:sriov-network cmk:cpu-manager"
@@ -156,6 +173,7 @@ function test_addons {
     popd
 
     test_openebs || failed_tests="${failed_tests} openebs"
+    test_kubevirt || failed_tests="${failed_tests} kubevirt"
 
     if [[ ! -z "$failed_tests" ]]; then
         echo "Test cases failed:${failed_tests}"
diff --git a/deploy/addons/kubevirt-test.yaml b/deploy/addons/kubevirt-test.yaml
new file mode 100644 (file)
index 0000000..cc96f79
--- /dev/null
@@ -0,0 +1,81 @@
+---
+apiVersion: kubevirt.io/v1
+kind: VirtualMachine
+metadata:
+  name: test-vm
+  namespace: kubevirt-test
+spec:
+  running: true
+  template:
+    metadata:
+      labels:
+        app: test-vm
+    spec:
+      domain:
+        cpu:
+          model: host-passthrough
+        devices:
+          disks:
+          - disk:
+              bus: virtio
+            name: rootfs
+          - disk:
+              bus: virtio
+            name: cloudinit
+          interfaces:
+          - name: default
+            masquerade: {}
+        resources:
+          requests:
+            memory: 256M
+      networks:
+      - name: default
+        pod: {}
+      volumes:
+        - name: rootfs
+          dataVolume:
+            name: fedora-dv
+        - name: cloudinit
+          cloudInitNoCloud:
+            userData: |
+              #cloud-config
+              ssh_pwauth: True
+              users:
+              - name: testuser
+                gecos: User
+                primary-group: testuser
+                groups: users
+                lock_passwd: false
+                shell: /bin/bash
+                # the password is "testuser"
+                passwd: "$6$rounds=4096$wPs4Hz4tfs$a8ssMnlvH.3GX88yxXKF2cKMlVULsnydoOKgkuStTErTq2dzKZiIx9R/pPWWh5JLxzoZEx7lsSX5T2jW5WISi1"
+                sudo: ALL=(ALL) NOPASSWD:ALL
+              runcmd:
+                - echo hello world
+  dataVolumeTemplates:
+  - metadata:
+      name: fedora-dv
+    spec:
+      pvc:
+        accessModes:
+          - ReadWriteOnce
+        resources:
+          requests:
+            storage: 5Gi
+        storageClassName: cstor-csi-disk
+      source:
+        http:
+          url: https://sjc.edge.kernel.org/fedora-buffet/fedora/linux/releases/33/Cloud/x86_64/images/Fedora-Cloud-Base-33-1.2.x86_64.qcow2
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: test-vm-service
+  namespace: kubevirt-test
+spec:
+  type: NodePort
+  selector:
+    app: test-vm
+  ports:
+  - name: ssh
+    port: 22