4 CLUSTER_NAME=cluster-test
5 NUM_MASTERS=${NUM_MASTERS:-"1"}
6 NUM_WORKERS=${NUM_WORKERS:-"1"}
8 # Create Fake DHCP File
10 cat <<EOF > /opt/icn/dhcp/dhcpd.leases
11 # The format of this file is documented in the dhcpd.leases(5) manual page.
12 # This lease file was written by isc-dhcp-4.3.5
14 # authoring-byte-order entry is generated, DO NOT DELETE
15 authoring-byte-order little-endian;
18 for ((master=0;master<NUM_MASTERS;++master)); do
19 lease=$(virsh net-dhcp-leases baremetal |grep "master-${master}")
20 mac=$(echo $lease | cut -d " " -f 3)
21 ip=$(echo $lease | cut -d " " -f 5)
23 cat <<EOF >> /opt/icn/dhcp/dhcpd.leases
25 starts 4 2019/08/08 22:32:49;
26 ends 4 2019/08/08 23:52:49;
27 cltt 4 2019/08/08 22:32:49;
29 next binding state free;
30 rewind binding state free;
31 hardware ethernet ${mac};
32 client-hostname "master-${master}";
36 for ((worker=0;worker<NUM_WORKERS;++worker)); do
37 lease=$(virsh net-dhcp-leases baremetal |grep "worker-${worker}")
38 mac=$(echo $lease | cut -d " " -f 3)
39 ip=$(echo $lease | cut -d " " -f 5)
41 cat <<EOF >> /opt/icn/dhcp/dhcpd.leases
43 starts 4 2019/08/08 22:32:49;
44 ends 4 2019/08/08 23:52:49;
45 cltt 4 2019/08/08 22:32:49;
47 next binding state free;
48 rewind binding state free;
49 hardware ethernet ${mac};
50 client-hostname "worker-${worker}";
55 # Create provisioning CR file for testing
56 cat <<EOF > e2etest/e2e_test_provisioning_cr.yaml
57 apiVersion: bpa.akraino.org/v1alpha1
60 name: e2e-test-provisioning
62 cluster: ${CLUSTER_NAME}
67 for ((master=0;master<NUM_MASTERS;++master)); do
68 lease=$(virsh net-dhcp-leases baremetal |grep "master-${master}")
69 mac=$(echo $lease | cut -d " " -f 3)
70 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
75 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
78 for ((worker=0;worker<NUM_WORKERS;++worker)); do
79 lease=$(virsh net-dhcp-leases baremetal |grep "worker-${worker}")
80 mac=$(echo $lease | cut -d " " -f 3)
81 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
86 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
90 kubectl apply -f e2etest/e2e_test_provisioning_cr.yaml
93 #Check Status of kud job pod
96 while [[ $status == "Running" ]]
98 echo "KUD install job still running"
100 stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
101 status=$(echo $stats | cut -d " " -f 3)
104 #Print logs of Job Pod
105 jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
106 podName=$(echo $jobPod | cut -d " " -f 1)
107 printf "\nNow Printing Job pod logs\n"
108 kubectl logs $podName
110 if [[ $status == "Completed" ]];
112 printf "KUD Install Job completed\n"
113 printf "Checking cluster status\n"
115 source ../../env/lib/common.sh
116 CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
117 APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
118 TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
119 if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
121 printf "\nKubernetes Cluster Install did not complete successfully\n"
124 printf "\nKubernetes Cluster Install was successful\n"
128 printf "KUD Install Job failed\n"
132 function emcoctl_apply {
133 # Workaround known issue with emcoctl resource instantation by retrying
134 # until a 2xx is received.
136 until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
137 awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
138 if [[ $try -lt 10 ]]; then
139 echo "Waiting for KUD addons to terminate"
149 function emcoctl_delete {
150 # Workaround known issue with emcoctl resource deletion by retrying
151 # until a 404 is received.
152 until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
153 awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
154 echo "Waiting for KUD addons to terminate"
159 function wait_for_addons_ready {
160 #Wait for addons to be ready
161 # The deployment intent group status reports instantiated before all
162 # Pods are ready, so wait for the instance label (.spec.version) of
163 # the deployment intent group instead.
165 for try in {0..9}; do
166 printf "Waiting for KUD addons to be ready\n"
168 if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
173 [[ $status == "Ready" ]]
177 printf "Installing KUD addons\n"
178 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
179 emcoctl_apply 00-controllers.yaml
180 emcoctl_apply 01-cluster.yaml
181 emcoctl_apply 02-project.yaml
182 emcoctl_apply 03-addons-app.yaml
184 wait_for_addons_ready
186 #Workaround for sriov+kubevirt issue on single-node clusters
187 # The issue is kubevirt creates a PodDisruptionBudget that prevents
188 # sriov from succesfully draining the node. The workaround is to
189 # temporarily scale down the kubevirt operator while the drain occurs.
190 KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
191 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0
193 #Install addon resources
194 printf "Installing KUD addon resources\n"
195 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
196 emcoctl_apply 04-addon-resources-app.yaml
198 wait_for_addons_ready
200 #Workaround for sriov+kubevirt issue on single-node clusters
201 # Scale the kubevirt operator back up and wait things to be ready
203 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
204 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
205 wait_for_addons_ready
208 printf "Testing KUD addons\n"
209 pushd /opt/kud/multi-cluster/addons/tests
211 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
212 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
213 #With containerd 1.2.13, the qat test container image fails to unpack.
214 kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
216 kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
218 for test in ${kud_tests}; do
219 KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
221 KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
222 if [[ ! -z "$failed_kud_tests" ]]; then
223 printf "Test cases failed:${failed_kud_tests}\n"
227 printf "All test cases passed\n"
230 printf "\n\nBeginning E2E Test Teardown\n\n"
231 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
232 emcoctl_delete 04-addon-resources-app.yaml
233 emcoctl_delete 03-addons-app.yaml
234 emcoctl_delete 02-project.yaml
235 emcoctl_delete 01-cluster.yaml
236 emcoctl_delete 00-controllers.yaml
238 kubectl delete -f e2etest/e2e_test_provisioning_cr.yaml
239 kubectl delete job kud-${CLUSTER_NAME}
240 kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
241 rm e2etest/e2e_test_provisioning_cr.yaml
242 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
243 rm -rf /opt/kud/multi-cluster/addons
244 rm /opt/icn/dhcp/dhcpd.leases