set -eu -o pipefail
CLUSTER_NAME=test-bmh-cluster
+ADDONS_NAMESPACE=kud
kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
sleep 5
until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
if [[ $try -lt 10 ]]; then
- echo "Waiting for KUD addons to terminate"
+ echo "Waiting for KUD addons to instantiate"
sleep 1s
else
return 1
for try in {0..19}; do
printf "Waiting for KUD addons to be ready\n"
sleep 30s
- if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
+ if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then
status="Ready"
break
fi
popd
wait_for_addons_ready
+#Workaround for sriov+kubevirt issue on single-node clusters
+# The issue is kubevirt creates a PodDisruptionBudget that prevents
+# sriov from succesfully draining the node. The workaround is to
+# temporarily scale down the kubevirt operator while the drain occurs.
+KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0
+
#Install addon resources
printf "Installing KUD addon resources\n"
pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
popd
wait_for_addons_ready
+#Workaround for sriov+kubevirt issue on single-node clusters
+# Scale the kubevirt operator back up and wait things to be ready
+# again.
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
+wait_for_addons_ready
+
#Test addons
printf "Testing KUD addons\n"
pushd /opt/kud/multi-cluster/addons/tests
container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
#With containerd 1.2.13, the qat test container image fails to unpack.
- kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network cmk"
+ kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
else
- kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network qat cmk"
+ kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
fi
for test in ${kud_tests}; do
KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
done
+KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
if [[ ! -z "$failed_kud_tests" ]]; then
printf "Test cases failed:${failed_kud_tests}\n"
exit 1