4 CLUSTER_NAME=test-bmh-cluster
7 kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
10 #Check Status of kud job pod
13 while [[ $status == "Running" ]]
15 echo "KUD install job still running"
17 stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
18 status=$(echo $stats | cut -d " " -f 3)
21 #Print logs of Job Pod
22 jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
23 podName=$(echo $jobPod | cut -d " " -f 1)
24 printf "\nNow Printing Job pod logs\n"
27 if [[ $status == "Completed" ]];
29 printf "KUD Install Job completed\n"
30 printf "Checking cluster status\n"
32 source ../../env/lib/common.sh
33 CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
34 APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
35 TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
36 if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
38 printf "\nKubernetes Cluster Install did not complete successfully\n"
41 printf "\nKubernetes Cluster Install was successful\n"
45 printf "KUD Install Job failed\n"
49 function emcoctl_apply {
50 # Workaround known issue with emcoctl resource instantation by retrying
51 # until a 2xx is received.
53 until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
54 awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
55 if [[ $try -lt 10 ]]; then
56 echo "Waiting for KUD addons to instantiate"
66 function emcoctl_delete {
67 # Workaround known issue with emcoctl resource deletion by retrying
68 # until a 404 is received.
69 until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
70 awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
71 echo "Waiting for KUD addons to terminate"
76 function wait_for_addons_ready {
77 #Wait for addons to be ready
78 # The deployment intent group status reports instantiated before all
79 # Pods are ready, so wait for the instance label (.spec.version) of
80 # the deployment intent group instead.
82 for try in {0..19}; do
83 printf "Waiting for KUD addons to be ready\n"
85 if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then
90 [[ $status == "Ready" ]]
94 printf "Installing KUD addons\n"
95 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
96 emcoctl_apply 00-controllers.yaml
97 emcoctl_apply 01-cluster.yaml
98 emcoctl_apply 02-project.yaml
99 emcoctl_apply 03-addons-app.yaml
101 wait_for_addons_ready
103 #Workaround for sriov+kubevirt issue on single-node clusters
104 # The issue is kubevirt creates a PodDisruptionBudget that prevents
105 # sriov from succesfully draining the node. The workaround is to
106 # temporarily scale down the kubevirt operator while the drain occurs.
107 KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
108 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0
110 #Install addon resources
111 printf "Installing KUD addon resources\n"
112 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
113 emcoctl_apply 04-addon-resources-app.yaml
115 wait_for_addons_ready
117 #Workaround for sriov+kubevirt issue on single-node clusters
118 # Scale the kubevirt operator back up and wait things to be ready
120 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
121 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
122 wait_for_addons_ready
125 printf "Testing KUD addons\n"
126 pushd /opt/kud/multi-cluster/addons/tests
128 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
129 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
130 #With containerd 1.2.13, the qat test container image fails to unpack.
131 kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
133 kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
135 for test in ${kud_tests}; do
136 KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
138 KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
139 if [[ ! -z "$failed_kud_tests" ]]; then
140 printf "Test cases failed:${failed_kud_tests}\n"
144 printf "All test cases passed\n"
147 printf "\n\nBeginning BMH E2E Test Teardown\n\n"
148 # Workaround known issue with emcoctl resource deletion by retrying
149 # until a 404 is received.
150 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
151 emcoctl_delete 04-addon-resources-app.yaml
152 emcoctl_delete 03-addons-app.yaml
153 emcoctl_delete 02-project.yaml
154 emcoctl_delete 01-cluster.yaml
155 emcoctl_delete 00-controllers.yaml
157 kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml
158 kubectl delete job kud-${CLUSTER_NAME}
159 kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
160 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
161 rm -rf /opt/kud/multi-cluster/addons