Remove bootstrap network
[icn.git] / cmd / bpa-operator / e2etest / bpa_vm_verifier.sh
1 #!/usr/bin/env bash
2 set -eu -o pipefail
3
4 CLUSTER_NAME=cluster-test
5 ADDONS_NAMESPACE=kud
6 NUM_MASTERS=${NUM_MASTERS:-"1"}
7 NUM_WORKERS=${NUM_WORKERS:-"1"}
8
9 # Create provisioning CR file for testing
10 cat <<EOF > e2etest/e2e_test_provisioning_cr.yaml
11 apiVersion: bpa.akraino.org/v1alpha1
12 kind: Provisioning
13 metadata:
14   name: e2e-test-provisioning
15   labels:
16     cluster: ${CLUSTER_NAME}
17     owner: c1
18 spec:
19   masters:
20 EOF
21 for ((master=0;master<NUM_MASTERS;++master)); do
22     mac=$(virsh domiflist "master_${master}" | awk '/provisioning/ {print $5}')
23     cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
24     - master-${master}:
25         mac-address: ${mac}
26 EOF
27 done
28 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
29   workers:
30 EOF
31 for ((worker=0;worker<NUM_WORKERS;++worker)); do
32     mac=$(virsh domiflist "worker_${worker}" | awk '/provisioning/ {print $5}')
33     cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
34     - worker-${worker}:
35         mac-address: ${mac}
36 EOF
37 done
38 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
39   KUDPlugins:
40     - emco
41 EOF
42 kubectl apply -f e2etest/e2e_test_provisioning_cr.yaml
43 sleep 5
44
45 #Check Status of kud job pod
46 status="Running"
47
48 while [[ $status == "Running" ]]
49 do
50     echo "KUD install job still running"
51     sleep 2m
52     stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
53     status=$(echo $stats | cut -d " " -f 3)
54 done
55
56 #Print logs of Job Pod
57 jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
58 podName=$(echo $jobPod | cut -d " " -f 1)
59 printf "\nNow Printing Job pod logs\n"
60 kubectl logs $podName
61
62 if [[ $status == "Completed" ]];
63 then
64    printf "KUD Install Job completed\n"
65    printf "Checking cluster status\n"
66
67    source ../../env/lib/common.sh
68    CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
69    APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
70    TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
71    if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
72    then
73        printf "\nKubernetes Cluster Install did not complete successfully\n"
74        exit 1
75    else
76        printf "\nKubernetes Cluster Install was successful\n"
77    fi
78
79 else
80     printf "KUD Install Job failed\n"
81     exit 1
82 fi
83
84 function emcoctl_apply {
85     # Workaround known issue with emcoctl resource instantation by retrying
86     # until a 2xx is received.
87     try=0
88     until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml  |
89                    awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
90         if [[ $try -lt 10 ]]; then
91             echo "Waiting for KUD addons to instantiate"
92             sleep 1s
93         else
94             return 1
95         fi
96         try=$((try + 1))
97     done
98     return 0
99 }
100
101 function emcoctl_delete {
102     # Workaround known issue with emcoctl resource deletion by retrying
103     # until a 404 is received.
104     until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
105                    awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
106         echo "Waiting for KUD addons to terminate"
107         sleep 1s
108     done
109 }
110
111 function wait_for_addons_ready {
112     #Wait for addons to be ready
113     # The deployment intent group status reports instantiated before all
114     # Pods are ready, so wait for the instance label (.spec.version) of
115     # the deployment intent group instead.
116     status="Pending"
117     for try in {0..9}; do
118         printf "Waiting for KUD addons to be ready\n"
119         sleep 30s
120         if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then
121             status="Ready"
122             break
123         fi
124     done
125     [[ $status == "Ready" ]]
126 }
127
128 #Install addons
129 printf "Installing KUD addons\n"
130 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
131 emcoctl_apply 00-controllers.yaml
132 emcoctl_apply 01-cluster.yaml
133 emcoctl_apply 02-project.yaml
134 emcoctl_apply 03-addons-app.yaml
135 popd
136 wait_for_addons_ready
137
138 #Workaround for sriov+kubevirt issue on single-node clusters
139 # The issue is kubevirt creates a PodDisruptionBudget that prevents
140 # sriov from succesfully draining the node.  The workaround is to
141 # temporarily scale down the kubevirt operator while the drain occurs.
142 KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
143 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0
144
145 #Install addon resources
146 printf "Installing KUD addon resources\n"
147 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
148 emcoctl_apply 04-addon-resources-app.yaml
149 popd
150 wait_for_addons_ready
151
152 #Workaround for sriov+kubevirt issue on single-node clusters
153 # Scale the kubevirt operator back up and wait things to be ready
154 # again.
155 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
156 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
157 wait_for_addons_ready
158
159 #Test addons
160 printf "Testing KUD addons\n"
161 pushd /opt/kud/multi-cluster/addons/tests
162 failed_kud_tests=""
163 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
164 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
165     #With containerd 1.2.13, the qat test container image fails to unpack.
166     kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
167 else
168     kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
169 fi
170 for test in ${kud_tests}; do
171     KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
172 done
173 KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
174 if [[ ! -z "$failed_kud_tests" ]]; then
175     printf "Test cases failed:${failed_kud_tests}\n"
176     exit 1
177 fi
178 popd
179 printf "All test cases passed\n"
180
181 #Tear down setup
182 printf "\n\nBeginning E2E Test Teardown\n\n"
183 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
184 emcoctl_delete 04-addon-resources-app.yaml
185 emcoctl_delete 03-addons-app.yaml
186 emcoctl_delete 02-project.yaml
187 emcoctl_delete 01-cluster.yaml
188 emcoctl_delete 00-controllers.yaml
189 popd
190 kubectl delete -f e2etest/e2e_test_provisioning_cr.yaml
191 kubectl delete job kud-${CLUSTER_NAME}
192 kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
193 rm e2etest/e2e_test_provisioning_cr.yaml
194 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
195 rm -rf /opt/kud/multi-cluster/addons
196 make delete