22ebea822ee1ba329bfcc35b83154dcec2eb03c5
[icn.git] / cmd / bpa-operator / e2etest / bpa_vm_verifier.sh
1 #!/usr/bin/env bash
2 set -eu -o pipefail
3
4 CLUSTER_NAME=cluster-test
5 NUM_MASTERS=${NUM_MASTERS:-"1"}
6 NUM_WORKERS=${NUM_WORKERS:-"1"}
7
8 # Create Fake DHCP File
9 mkdir -p /opt/icn/dhcp
10 cat <<EOF > /opt/icn/dhcp/dhcpd.leases
11 # The format of this file is documented in the dhcpd.leases(5) manual page.
12 # This lease file was written by isc-dhcp-4.3.5
13
14 # authoring-byte-order entry is generated, DO NOT DELETE
15 authoring-byte-order little-endian;
16
17 EOF
18 for ((master=0;master<NUM_MASTERS;++master)); do
19     lease=$(virsh net-dhcp-leases baremetal |grep "master-${master}")
20     mac=$(echo $lease | cut -d " " -f 3)
21     ip=$(echo $lease | cut -d " " -f 5)
22     ip="${ip%%/*}"
23     cat <<EOF >> /opt/icn/dhcp/dhcpd.leases
24 lease ${ip} {
25   starts 4 2019/08/08 22:32:49;
26   ends 4 2019/08/08 23:52:49;
27   cltt 4 2019/08/08 22:32:49;
28   binding state active;
29   next binding state free;
30   rewind binding state free;
31   hardware ethernet ${mac};
32   client-hostname "master-${master}";
33 }
34 EOF
35 done
36 for ((worker=0;worker<NUM_WORKERS;++worker)); do
37     lease=$(virsh net-dhcp-leases baremetal |grep "worker-${worker}")
38     mac=$(echo $lease | cut -d " " -f 3)
39     ip=$(echo $lease | cut -d " " -f 5)
40     ip="${ip%%/*}"
41     cat <<EOF >> /opt/icn/dhcp/dhcpd.leases
42 lease ${ip} {
43   starts 4 2019/08/08 22:32:49;
44   ends 4 2019/08/08 23:52:49;
45   cltt 4 2019/08/08 22:32:49;
46   binding state active;
47   next binding state free;
48   rewind binding state free;
49   hardware ethernet ${mac};
50   client-hostname "worker-${worker}";
51 }
52 EOF
53 done
54
55 # Create provisioning CR file for testing
56 cat <<EOF > e2etest/e2e_test_provisioning_cr.yaml
57 apiVersion: bpa.akraino.org/v1alpha1
58 kind: Provisioning
59 metadata:
60   name: e2e-test-provisioning
61   labels:
62     cluster: ${CLUSTER_NAME}
63     owner: c1
64 spec:
65   masters:
66 EOF
67 for ((master=0;master<NUM_MASTERS;++master)); do
68     lease=$(virsh net-dhcp-leases baremetal |grep "master-${master}")
69     mac=$(echo $lease | cut -d " " -f 3)
70     cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
71     - master-${master}:
72         mac-address: ${mac}
73 EOF
74 done
75 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
76   workers:
77 EOF
78 for ((worker=0;worker<NUM_WORKERS;++worker)); do
79     lease=$(virsh net-dhcp-leases baremetal |grep "worker-${worker}")
80     mac=$(echo $lease | cut -d " " -f 3)
81     cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
82     - worker-${worker}:
83         mac-address: ${mac}
84 EOF
85 done
86 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
87   KUDPlugins:
88     - emco
89 EOF
90 kubectl apply -f e2etest/e2e_test_provisioning_cr.yaml
91 sleep 5
92
93 #Check Status of kud job pod
94 status="Running"
95
96 while [[ $status == "Running" ]]
97 do
98     echo "KUD install job still running"
99     sleep 2m
100     stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
101     status=$(echo $stats | cut -d " " -f 3)
102 done
103
104 #Print logs of Job Pod
105 jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
106 podName=$(echo $jobPod | cut -d " " -f 1)
107 printf "\nNow Printing Job pod logs\n"
108 kubectl logs $podName
109
110 if [[ $status == "Completed" ]];
111 then
112    printf "KUD Install Job completed\n"
113    printf "Checking cluster status\n"
114
115    source ../../env/lib/common.sh
116    CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
117    APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
118    TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
119    if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
120    then
121        printf "\nKubernetes Cluster Install did not complete successfully\n"
122        exit 1
123    else
124        printf "\nKubernetes Cluster Install was successful\n"
125    fi
126
127 else
128     printf "KUD Install Job failed\n"
129     exit 1
130 fi
131
132 function emcoctl_apply {
133     # Workaround known issue with emcoctl resource instantation by retrying
134     # until a 2xx is received.
135     try=0
136     until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml  |
137                    awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
138         if [[ $try -lt 10 ]]; then
139             echo "Waiting for KUD addons to terminate"
140             sleep 1s
141         else
142             return 1
143         fi
144         try=$((try + 1))
145     done
146     return 0
147 }
148
149 function emcoctl_delete {
150     # Workaround known issue with emcoctl resource deletion by retrying
151     # until a 404 is received.
152     until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
153                    awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
154         echo "Waiting for KUD addons to terminate"
155         sleep 1s
156     done
157 }
158
159 function wait_for_addons_ready {
160     #Wait for addons to be ready
161     # The deployment intent group status reports instantiated before all
162     # Pods are ready, so wait for the instance label (.spec.version) of
163     # the deployment intent group instead.
164     status="Pending"
165     for try in {0..9}; do
166         printf "Waiting for KUD addons to be ready\n"
167         sleep 30s
168         if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
169             status="Ready"
170             break
171         fi
172     done
173     [[ $status == "Ready" ]]
174 }
175
176 #Install addons
177 printf "Installing KUD addons\n"
178 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
179 emcoctl_apply 00-controllers.yaml
180 emcoctl_apply 01-cluster.yaml
181 emcoctl_apply 02-project.yaml
182 emcoctl_apply 03-addons-app.yaml
183 popd
184 wait_for_addons_ready
185
186 #Workaround for sriov+kubevirt issue on single-node clusters
187 # The issue is kubevirt creates a PodDisruptionBudget that prevents
188 # sriov from succesfully draining the node.  The workaround is to
189 # temporarily scale down the kubevirt operator while the drain occurs.
190 KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
191 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0
192
193 #Install addon resources
194 printf "Installing KUD addon resources\n"
195 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
196 emcoctl_apply 04-addon-resources-app.yaml
197 popd
198 wait_for_addons_ready
199
200 #Workaround for sriov+kubevirt issue on single-node clusters
201 # Scale the kubevirt operator back up and wait things to be ready
202 # again.
203 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
204 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
205 wait_for_addons_ready
206
207 #Test addons
208 printf "Testing KUD addons\n"
209 pushd /opt/kud/multi-cluster/addons/tests
210 failed_kud_tests=""
211 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
212 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
213     #With containerd 1.2.13, the qat test container image fails to unpack.
214     kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
215 else
216     kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
217 fi
218 for test in ${kud_tests}; do
219     KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
220 done
221 KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
222 if [[ ! -z "$failed_kud_tests" ]]; then
223     printf "Test cases failed:${failed_kud_tests}\n"
224     exit 1
225 fi
226 popd
227 printf "All test cases passed\n"
228
229 #Tear down setup
230 printf "\n\nBeginning E2E Test Teardown\n\n"
231 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
232 emcoctl_delete 04-addon-resources-app.yaml
233 emcoctl_delete 03-addons-app.yaml
234 emcoctl_delete 02-project.yaml
235 emcoctl_delete 01-cluster.yaml
236 emcoctl_delete 00-controllers.yaml
237 popd
238 kubectl delete -f e2etest/e2e_test_provisioning_cr.yaml
239 kubectl delete job kud-${CLUSTER_NAME}
240 kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
241 rm e2etest/e2e_test_provisioning_cr.yaml
242 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
243 rm -rf /opt/kud/multi-cluster/addons
244 rm /opt/icn/dhcp/dhcpd.leases
245 make delete