summary |
shortlog |
log |
commit | commitdiff |
review |
tree
raw |
patch |
inline | side by side (from parent 1:
26b1d59)
Signed-off-by: Todd Malsbary <todd.malsbary@intel.com>
Change-Id: Ic8e75cfd6f450a38250d11e903a108133c4f65f3
set -eu -o pipefail
CLUSTER_NAME=test-bmh-cluster
set -eu -o pipefail
CLUSTER_NAME=test-bmh-cluster
kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
sleep 5
kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
sleep 5
until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
if [[ $try -lt 10 ]]; then
until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
if [[ $try -lt 10 ]]; then
- echo "Waiting for KUD addons to terminate"
+ echo "Waiting for KUD addons to instantiate"
for try in {0..19}; do
printf "Waiting for KUD addons to be ready\n"
sleep 30s
for try in {0..19}; do
printf "Waiting for KUD addons to be ready\n"
sleep 30s
- if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
+ if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then
# The issue is kubevirt creates a PodDisruptionBudget that prevents
# sriov from succesfully draining the node. The workaround is to
# temporarily scale down the kubevirt operator while the drain occurs.
# The issue is kubevirt creates a PodDisruptionBudget that prevents
# sriov from succesfully draining the node. The workaround is to
# temporarily scale down the kubevirt operator while the drain occurs.
-KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
-KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0
+KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0
#Install addon resources
printf "Installing KUD addon resources\n"
#Install addon resources
printf "Installing KUD addon resources\n"
# Scale the kubevirt operator back up and wait things to be ready
# again.
KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
# Scale the kubevirt operator back up and wait things to be ready
# again.
KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
-KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
wait_for_addons_ready
#Test addons
wait_for_addons_ready
#Test addons
set -eu -o pipefail
CLUSTER_NAME=cluster-test
set -eu -o pipefail
CLUSTER_NAME=cluster-test
NUM_MASTERS=${NUM_MASTERS:-"1"}
NUM_WORKERS=${NUM_WORKERS:-"1"}
NUM_MASTERS=${NUM_MASTERS:-"1"}
NUM_WORKERS=${NUM_WORKERS:-"1"}
until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
if [[ $try -lt 10 ]]; then
until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
if [[ $try -lt 10 ]]; then
- echo "Waiting for KUD addons to terminate"
+ echo "Waiting for KUD addons to instantiate"
for try in {0..9}; do
printf "Waiting for KUD addons to be ready\n"
sleep 30s
for try in {0..9}; do
printf "Waiting for KUD addons to be ready\n"
sleep 30s
- if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
+ if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then
# The issue is kubevirt creates a PodDisruptionBudget that prevents
# sriov from succesfully draining the node. The workaround is to
# temporarily scale down the kubevirt operator while the drain occurs.
# The issue is kubevirt creates a PodDisruptionBudget that prevents
# sriov from succesfully draining the node. The workaround is to
# temporarily scale down the kubevirt operator while the drain occurs.
-KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
-KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0
+KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0
#Install addon resources
printf "Installing KUD addon resources\n"
#Install addon resources
printf "Installing KUD addon resources\n"
# Scale the kubevirt operator back up and wait things to be ready
# again.
KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
# Scale the kubevirt operator back up and wait things to be ready
# again.
KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
-KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
wait_for_addons_ready
#Test addons
wait_for_addons_ready
#Test addons