4 CLUSTER_NAME=test-bmh-cluster
6 kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
9 #Check Status of kud job pod
12 while [[ $status == "Running" ]]
14 echo "KUD install job still running"
16 stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
17 status=$(echo $stats | cut -d " " -f 3)
20 #Print logs of Job Pod
21 jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
22 podName=$(echo $jobPod | cut -d " " -f 1)
23 printf "\nNow Printing Job pod logs\n"
26 if [[ $status == "Completed" ]];
28 printf "KUD Install Job completed\n"
29 printf "Checking cluster status\n"
31 source ../../env/lib/common.sh
32 CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
33 APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
34 TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
35 if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
37 printf "\nKubernetes Cluster Install did not complete successfully\n"
40 printf "\nKubernetes Cluster Install was successful\n"
44 printf "KUD Install Job failed\n"
48 function emcoctl_apply {
49 # Workaround known issue with emcoctl resource instantation by retrying
50 # until a 2xx is received.
52 until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
53 awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
54 if [[ $try -lt 10 ]]; then
55 echo "Waiting for KUD addons to terminate"
65 function emcoctl_delete {
66 # Workaround known issue with emcoctl resource deletion by retrying
67 # until a 404 is received.
68 until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
69 awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
70 echo "Waiting for KUD addons to terminate"
75 function wait_for_addons_ready {
76 #Wait for addons to be ready
77 # The deployment intent group status reports instantiated before all
78 # Pods are ready, so wait for the instance label (.spec.version) of
79 # the deployment intent group instead.
81 for try in {0..19}; do
82 printf "Waiting for KUD addons to be ready\n"
84 if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
89 [[ $status == "Ready" ]]
93 printf "Installing KUD addons\n"
94 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
95 emcoctl_apply 00-controllers.yaml
96 emcoctl_apply 01-cluster.yaml
97 emcoctl_apply 02-project.yaml
98 emcoctl_apply 03-addons-app.yaml
100 wait_for_addons_ready
102 #Workaround for sriov+kubevirt issue on single-node clusters
103 # The issue is kubevirt creates a PodDisruptionBudget that prevents
104 # sriov from succesfully draining the node. The workaround is to
105 # temporarily scale down the kubevirt operator while the drain occurs.
106 KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
107 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0
109 #Install addon resources
110 printf "Installing KUD addon resources\n"
111 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
112 emcoctl_apply 04-addon-resources-app.yaml
114 wait_for_addons_ready
116 #Workaround for sriov+kubevirt issue on single-node clusters
117 # Scale the kubevirt operator back up and wait things to be ready
119 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
120 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
121 wait_for_addons_ready
124 printf "Testing KUD addons\n"
125 pushd /opt/kud/multi-cluster/addons/tests
127 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
128 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
129 #With containerd 1.2.13, the qat test container image fails to unpack.
130 kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
132 kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
134 for test in ${kud_tests}; do
135 KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
137 KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
138 if [[ ! -z "$failed_kud_tests" ]]; then
139 printf "Test cases failed:${failed_kud_tests}\n"
143 printf "All test cases passed\n"
146 printf "\n\nBeginning BMH E2E Test Teardown\n\n"
147 # Workaround known issue with emcoctl resource deletion by retrying
148 # until a 404 is received.
149 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
150 emcoctl_delete 04-addon-resources-app.yaml
151 emcoctl_delete 03-addons-app.yaml
152 emcoctl_delete 02-project.yaml
153 emcoctl_delete 01-cluster.yaml
154 emcoctl_delete 00-controllers.yaml
156 kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml
157 kubectl delete job kud-${CLUSTER_NAME}
158 kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
159 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
160 rm -rf /opt/kud/multi-cluster/addons