From db33a9a3b7c7d2c85f45f8204fca27f767fa9350 Mon Sep 17 00:00:00 2001 From: Todd Malsbary Date: Tue, 17 Aug 2021 10:50:54 -0700 Subject: [PATCH] Select addons namespace with kubectl in e2e tests Signed-off-by: Todd Malsbary Change-Id: Ic8e75cfd6f450a38250d11e903a108133c4f65f3 --- cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh | 11 ++++++----- cmd/bpa-operator/e2etest/bpa_vm_verifier.sh | 11 ++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh b/cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh index 66c3263..fa1f90a 100755 --- a/cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh +++ b/cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh @@ -2,6 +2,7 @@ set -eu -o pipefail CLUSTER_NAME=test-bmh-cluster +ADDONS_NAMESPACE=kud kubectl create -f e2etest/test_bmh_provisioning_cr.yaml sleep 5 @@ -52,7 +53,7 @@ function emcoctl_apply { until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml | awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do if [[ $try -lt 10 ]]; then - echo "Waiting for KUD addons to terminate" + echo "Waiting for KUD addons to instantiate" sleep 1s else return 1 @@ -81,7 +82,7 @@ function wait_for_addons_ready { for try in {0..19}; do printf "Waiting for KUD addons to be ready\n" sleep 30s - if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then + if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then status="Ready" break fi @@ -103,8 +104,8 @@ wait_for_addons_ready # The issue is kubevirt creates a PodDisruptionBudget that prevents # sriov from succesfully draining the node. The workaround is to # temporarily scale down the kubevirt operator while the drain occurs. -KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}') -KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0 +KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}') +KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0 #Install addon resources printf "Installing KUD addon resources\n" @@ -117,7 +118,7 @@ wait_for_addons_ready # Scale the kubevirt operator back up and wait things to be ready # again. KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all -KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS} +KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS} wait_for_addons_ready #Test addons diff --git a/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh b/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh index 22ebea8..7c8b4c4 100755 --- a/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh +++ b/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh @@ -2,6 +2,7 @@ set -eu -o pipefail CLUSTER_NAME=cluster-test +ADDONS_NAMESPACE=kud NUM_MASTERS=${NUM_MASTERS:-"1"} NUM_WORKERS=${NUM_WORKERS:-"1"} @@ -136,7 +137,7 @@ function emcoctl_apply { until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml | awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do if [[ $try -lt 10 ]]; then - echo "Waiting for KUD addons to terminate" + echo "Waiting for KUD addons to instantiate" sleep 1s else return 1 @@ -165,7 +166,7 @@ function wait_for_addons_ready { for try in {0..9}; do printf "Waiting for KUD addons to be ready\n" sleep 30s - if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then + if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then status="Ready" break fi @@ -187,8 +188,8 @@ wait_for_addons_ready # The issue is kubevirt creates a PodDisruptionBudget that prevents # sriov from succesfully draining the node. The workaround is to # temporarily scale down the kubevirt operator while the drain occurs. -KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}') -KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0 +KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}') +KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0 #Install addon resources printf "Installing KUD addon resources\n" @@ -201,7 +202,7 @@ wait_for_addons_ready # Scale the kubevirt operator back up and wait things to be ready # again. KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all -KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS} +KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS} wait_for_addons_ready #Test addons -- 2.16.6