Add kubevirt to e2e tested addons 81/4381/1
authorTodd Malsbary <todd.malsbary@intel.com>
Thu, 22 Jul 2021 21:16:55 +0000 (14:16 -0700)
committerTodd Malsbary <todd.malsbary@intel.com>
Wed, 28 Jul 2021 22:31:40 +0000 (15:31 -0700)
Signed-off-by: Todd Malsbary <todd.malsbary@intel.com>
Change-Id: I100f2ec09b88a12c29740220bb3ff33a350596e2

cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh
cmd/bpa-operator/e2etest/bpa_vm_verifier.sh

index 1fd458e..66c3263 100755 (executable)
@@ -99,6 +99,13 @@ emcoctl_apply 03-addons-app.yaml
 popd
 wait_for_addons_ready
 
+#Workaround for sriov+kubevirt issue on single-node clusters
+# The issue is kubevirt creates a PodDisruptionBudget that prevents
+# sriov from succesfully draining the node.  The workaround is to
+# temporarily scale down the kubevirt operator while the drain occurs.
+KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0
+
 #Install addon resources
 printf "Installing KUD addon resources\n"
 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
@@ -106,6 +113,13 @@ emcoctl_apply 04-addon-resources-app.yaml
 popd
 wait_for_addons_ready
 
+#Workaround for sriov+kubevirt issue on single-node clusters
+# Scale the kubevirt operator back up and wait things to be ready
+# again.
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
+wait_for_addons_ready
+
 #Test addons
 printf "Testing KUD addons\n"
 pushd /opt/kud/multi-cluster/addons/tests
@@ -113,13 +127,14 @@ failed_kud_tests=""
 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
     #With containerd 1.2.13, the qat test container image fails to unpack.
-    kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network cmk"
+    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
 else
-    kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network qat cmk"
+    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
 fi
 for test in ${kud_tests}; do
     KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
 done
+KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
 if [[ ! -z "$failed_kud_tests" ]]; then
     printf "Test cases failed:${failed_kud_tests}\n"
     exit 1
index e26a549..22ebea8 100755 (executable)
@@ -183,6 +183,13 @@ emcoctl_apply 03-addons-app.yaml
 popd
 wait_for_addons_ready
 
+#Workaround for sriov+kubevirt issue on single-node clusters
+# The issue is kubevirt creates a PodDisruptionBudget that prevents
+# sriov from succesfully draining the node.  The workaround is to
+# temporarily scale down the kubevirt operator while the drain occurs.
+KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0
+
 #Install addon resources
 printf "Installing KUD addon resources\n"
 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
@@ -190,6 +197,13 @@ emcoctl_apply 04-addon-resources-app.yaml
 popd
 wait_for_addons_ready
 
+#Workaround for sriov+kubevirt issue on single-node clusters
+# Scale the kubevirt operator back up and wait things to be ready
+# again.
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
+wait_for_addons_ready
+
 #Test addons
 printf "Testing KUD addons\n"
 pushd /opt/kud/multi-cluster/addons/tests
@@ -197,13 +211,14 @@ failed_kud_tests=""
 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
     #With containerd 1.2.13, the qat test container image fails to unpack.
-    kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network cmk"
+    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
 else
-    kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network qat cmk"
+    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
 fi
 for test in ${kud_tests}; do
     KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
 done
+KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
 if [[ ! -z "$failed_kud_tests" ]]; then
     printf "Test cases failed:${failed_kud_tests}\n"
     exit 1