X-Git-Url: https://gerrit.akraino.org/r/gitweb?a=blobdiff_plain;ds=sidebyside;f=cmd%2Fbpa-operator%2Fe2etest%2Fbpa_vm_verifier.sh;h=7c8b4c4a63635649d2357984e4843f5fdee8474d;hb=db33a9a3b7c7d2c85f45f8204fca27f767fa9350;hp=e26a549fe306ec2ee79432ca67249670ad5cd8c5;hpb=280ab1f92c7bac3d053a46b4ffe3174b75185466;p=icn.git diff --git a/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh b/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh index e26a549..7c8b4c4 100755 --- a/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh +++ b/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh @@ -2,6 +2,7 @@ set -eu -o pipefail CLUSTER_NAME=cluster-test +ADDONS_NAMESPACE=kud NUM_MASTERS=${NUM_MASTERS:-"1"} NUM_WORKERS=${NUM_WORKERS:-"1"} @@ -136,7 +137,7 @@ function emcoctl_apply { until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml | awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do if [[ $try -lt 10 ]]; then - echo "Waiting for KUD addons to terminate" + echo "Waiting for KUD addons to instantiate" sleep 1s else return 1 @@ -165,7 +166,7 @@ function wait_for_addons_ready { for try in {0..9}; do printf "Waiting for KUD addons to be ready\n" sleep 30s - if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then + if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then status="Ready" break fi @@ -183,6 +184,13 @@ emcoctl_apply 03-addons-app.yaml popd wait_for_addons_ready +#Workaround for sriov+kubevirt issue on single-node clusters +# The issue is kubevirt creates a PodDisruptionBudget that prevents +# sriov from succesfully draining the node. The workaround is to +# temporarily scale down the kubevirt operator while the drain occurs. +KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}') +KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0 + #Install addon resources printf "Installing KUD addon resources\n" pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons @@ -190,6 +198,13 @@ emcoctl_apply 04-addon-resources-app.yaml popd wait_for_addons_ready +#Workaround for sriov+kubevirt issue on single-node clusters +# Scale the kubevirt operator back up and wait things to be ready +# again. +KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all +KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS} +wait_for_addons_ready + #Test addons printf "Testing KUD addons\n" pushd /opt/kud/multi-cluster/addons/tests @@ -197,13 +212,14 @@ failed_kud_tests="" container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}') if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then #With containerd 1.2.13, the qat test container image fails to unpack. - kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network cmk" + kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk" else - kud_tests="topology-manager-sriov multus ovn4nfv nfd sriov-network qat cmk" + kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk" fi for test in ${kud_tests}; do KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}" done +KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2" if [[ ! -z "$failed_kud_tests" ]]; then printf "Test cases failed:${failed_kud_tests}\n" exit 1