-#Workaround for sriov+kubevirt issue on single-node clusters
-# The issue is kubevirt creates a PodDisruptionBudget that prevents
-# sriov from succesfully draining the node. The workaround is to
-# temporarily scale down the kubevirt operator while the drain occurs.
-KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
-KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0
+function addons_terminated {
+ [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get pod -l app.kubernetes.io/instance=r1 --no-headers 2>/dev/null | wc -l) == 0 ]]
+}
+
+function networks_instantiated {
+ local -r count=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get sriovnetworknodestate --no-headers 2>/dev/null | wc -l)
+ local -r succeeded=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get sriovnetworknodestate -o jsonpath='{range .items[*]}{.status.syncStatus}{"\n"}{end}' 2>/dev/null | grep "Succeeded" | wc -l)
+ [[ $count == $succeeded ]]
+}
+
+function networks_terminated {
+ # The syncStatus will be the same whether we are instantiating or terminating an SR-IOV network
+ networks_instantiated
+}
+
+function kubevirt_instantiated {
+ [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get kubevirt -o jsonpath='{range .items[*]}{.status.phase}{"\n"}{end}' 2>/dev/null | grep "Deployed" | wc -l) == 1 ]]
+ [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get cdi -o jsonpath='{range .items[*]}{.status.phase}{"\n"}{end}' 2>/dev/null | grep "Deployed" | wc -l) == 1 ]]
+}
+
+function kubevirt_terminated {
+ [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get kubevirt --no-headers 2>/dev/null | wc -l) == 0 ]]
+ [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get cdi --no-headers 2>/dev/null | wc -l) == 0 ]]
+}