Select addons namespace with kubectl in e2e tests
[icn.git] / cmd / bpa-operator / e2etest / bpa_bmh_verifier.sh
1 #!/usr/bin/env bash
2 set -eu -o pipefail
3
4 CLUSTER_NAME=test-bmh-cluster
5 ADDONS_NAMESPACE=kud
6
7 kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
8 sleep 5
9
10 #Check Status of kud job pod
11 status="Running"
12
13 while [[ $status == "Running" ]]
14 do
15     echo "KUD install job still running"
16     sleep 2m
17     stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
18     status=$(echo $stats | cut -d " " -f 3)
19 done
20
21 #Print logs of Job Pod
22 jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
23 podName=$(echo $jobPod | cut -d " " -f 1)
24 printf "\nNow Printing Job pod logs\n"
25 kubectl logs $podName
26
27 if [[ $status == "Completed" ]];
28 then
29    printf "KUD Install Job completed\n"
30    printf "Checking cluster status\n"
31
32    source ../../env/lib/common.sh
33    CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
34    APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
35    TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
36    if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
37    then
38        printf "\nKubernetes Cluster Install did not complete successfully\n"
39        exit 1
40    else
41        printf "\nKubernetes Cluster Install was successful\n"
42    fi
43
44 else
45     printf "KUD Install Job failed\n"
46     exit 1
47 fi
48
49 function emcoctl_apply {
50     # Workaround known issue with emcoctl resource instantation by retrying
51     # until a 2xx is received.
52     try=0
53     until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml  |
54                    awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
55         if [[ $try -lt 10 ]]; then
56             echo "Waiting for KUD addons to instantiate"
57             sleep 1s
58         else
59             return 1
60         fi
61         try=$((try + 1))
62     done
63     return 0
64 }
65
66 function emcoctl_delete {
67     # Workaround known issue with emcoctl resource deletion by retrying
68     # until a 404 is received.
69     until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
70                    awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
71         echo "Waiting for KUD addons to terminate"
72         sleep 1s
73     done
74 }
75
76 function wait_for_addons_ready {
77     #Wait for addons to be ready
78     # The deployment intent group status reports instantiated before all
79     # Pods are ready, so wait for the instance label (.spec.version) of
80     # the deployment intent group instead.
81     status="Pending"
82     for try in {0..19}; do
83         printf "Waiting for KUD addons to be ready\n"
84         sleep 30s
85         if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then
86             status="Ready"
87             break
88         fi
89     done
90     [[ $status == "Ready" ]]
91 }
92
93 #Install addons
94 printf "Installing KUD addons\n"
95 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
96 emcoctl_apply 00-controllers.yaml
97 emcoctl_apply 01-cluster.yaml
98 emcoctl_apply 02-project.yaml
99 emcoctl_apply 03-addons-app.yaml
100 popd
101 wait_for_addons_ready
102
103 #Workaround for sriov+kubevirt issue on single-node clusters
104 # The issue is kubevirt creates a PodDisruptionBudget that prevents
105 # sriov from succesfully draining the node.  The workaround is to
106 # temporarily scale down the kubevirt operator while the drain occurs.
107 KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
108 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0
109
110 #Install addon resources
111 printf "Installing KUD addon resources\n"
112 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
113 emcoctl_apply 04-addon-resources-app.yaml
114 popd
115 wait_for_addons_ready
116
117 #Workaround for sriov+kubevirt issue on single-node clusters
118 # Scale the kubevirt operator back up and wait things to be ready
119 # again.
120 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
121 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
122 wait_for_addons_ready
123
124 #Test addons
125 printf "Testing KUD addons\n"
126 pushd /opt/kud/multi-cluster/addons/tests
127 failed_kud_tests=""
128 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
129 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
130     #With containerd 1.2.13, the qat test container image fails to unpack.
131     kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
132 else
133     kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
134 fi
135 for test in ${kud_tests}; do
136     KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
137 done
138 KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
139 if [[ ! -z "$failed_kud_tests" ]]; then
140     printf "Test cases failed:${failed_kud_tests}\n"
141     exit 1
142 fi
143 popd
144 printf "All test cases passed\n"
145
146 #Tear down setup
147 printf "\n\nBeginning BMH E2E Test Teardown\n\n"
148 # Workaround known issue with emcoctl resource deletion by retrying
149 # until a 404 is received.
150 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
151 emcoctl_delete 04-addon-resources-app.yaml
152 emcoctl_delete 03-addons-app.yaml
153 emcoctl_delete 02-project.yaml
154 emcoctl_delete 01-cluster.yaml
155 emcoctl_delete 00-controllers.yaml
156 popd
157 kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml
158 kubectl delete job kud-${CLUSTER_NAME}
159 kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
160 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
161 rm -rf /opt/kud/multi-cluster/addons
162 make delete