Select addons namespace with kubectl in e2e tests
[icn.git] / cmd / bpa-operator / e2etest / bpa_bmh_verifier.sh
index 70af103..fa1f90a 100755 (executable)
@@ -2,6 +2,7 @@
 set -eu -o pipefail
 
 CLUSTER_NAME=test-bmh-cluster
+ADDONS_NAMESPACE=kud
 
 kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
 sleep 5
@@ -45,45 +46,80 @@ else
     exit 1
 fi
 
+function emcoctl_apply {
+    # Workaround known issue with emcoctl resource instantation by retrying
+    # until a 2xx is received.
+    try=0
+    until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml  |
+                   awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
+        if [[ $try -lt 10 ]]; then
+            echo "Waiting for KUD addons to instantiate"
+            sleep 1s
+        else
+            return 1
+        fi
+        try=$((try + 1))
+    done
+    return 0
+}
+
+function emcoctl_delete {
+    # Workaround known issue with emcoctl resource deletion by retrying
+    # until a 404 is received.
+    until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
+                   awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
+        echo "Waiting for KUD addons to terminate"
+        sleep 1s
+    done
+}
+
+function wait_for_addons_ready {
+    #Wait for addons to be ready
+    # The deployment intent group status reports instantiated before all
+    # Pods are ready, so wait for the instance label (.spec.version) of
+    # the deployment intent group instead.
+    status="Pending"
+    for try in {0..19}; do
+       printf "Waiting for KUD addons to be ready\n"
+       sleep 30s
+       if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then
+            status="Ready"
+            break
+       fi
+    done
+    [[ $status == "Ready" ]]
+}
+
 #Install addons
 printf "Installing KUD addons\n"
 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
-/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f prerequisites.yaml -v values.yaml
-/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values.yaml
+emcoctl_apply 00-controllers.yaml
+emcoctl_apply 01-cluster.yaml
+emcoctl_apply 02-project.yaml
+emcoctl_apply 03-addons-app.yaml
 popd
+wait_for_addons_ready
 
-#Wait for addons to be ready
-# The deployment intent group status reports instantiated before all
-# Pods are ready, so wait for the instance label (.spec.version) of
-# the deployment intent group instead.
-status="Pending"
-for try in {0..19}; do
-    printf "Waiting for KUD addons to be ready\n"
-    sleep 30s
-    if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
-        status="Ready"
-        break
-    fi
-done
-[[ $status == "Ready" ]]
+#Workaround for sriov+kubevirt issue on single-node clusters
+# The issue is kubevirt creates a PodDisruptionBudget that prevents
+# sriov from succesfully draining the node.  The workaround is to
+# temporarily scale down the kubevirt operator while the drain occurs.
+KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0
 
 #Install addon resources
 printf "Installing KUD addon resources\n"
 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
-/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values-resources.yaml
+emcoctl_apply 04-addon-resources-app.yaml
 popd
+wait_for_addons_ready
 
-#Wait for addon resources to be ready
-status="Pending"
-for try in {0..9}; do
-    printf "Waiting for KUD addon resources to be ready\n"
-    sleep 30s
-    if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
-       status="Ready"
-       break
-    fi
-done
-[[ $status == "Ready" ]]
+#Workaround for sriov+kubevirt issue on single-node clusters
+# Scale the kubevirt operator back up and wait things to be ready
+# again.
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
+wait_for_addons_ready
 
 #Test addons
 printf "Testing KUD addons\n"
@@ -92,13 +128,14 @@ failed_kud_tests=""
 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
     #With containerd 1.2.13, the qat test container image fails to unpack.
-    kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network cmk"
+    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
 else
-    kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network qat cmk"
+    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
 fi
 for test in ${kud_tests}; do
     KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
 done
+KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
 if [[ ! -z "$failed_kud_tests" ]]; then
     printf "Test cases failed:${failed_kud_tests}\n"
     exit 1
@@ -111,21 +148,11 @@ printf "\n\nBeginning BMH E2E Test Teardown\n\n"
 # Workaround known issue with emcoctl resource deletion by retrying
 # until a 404 is received.
 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
-until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values-resources.yaml |
-            awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
-    echo "Waiting for KUD addon resources to terminate"
-    sleep 1s
-done
-until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values.yaml |
-            awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
-    echo "Waiting for KUD addons to terminate"
-    sleep 1s
-done
-until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f prerequisites.yaml -v values.yaml |
-            awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
-    echo "Waiting for KUD addons to terminate"
-    sleep 1s
-done
+emcoctl_delete 04-addon-resources-app.yaml
+emcoctl_delete 03-addons-app.yaml
+emcoctl_delete 02-project.yaml
+emcoctl_delete 01-cluster.yaml
+emcoctl_delete 00-controllers.yaml
 popd
 kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml
 kubectl delete job kud-${CLUSTER_NAME}