4 CLUSTER_NAME=cluster-test
6 NUM_MASTERS=${NUM_MASTERS:-"1"}
7 NUM_WORKERS=${NUM_WORKERS:-"1"}
9 # Create Fake DHCP File
10 mkdir -p /opt/icn/dhcp
11 cat <<EOF > /opt/icn/dhcp/dhcpd.leases
12 # The format of this file is documented in the dhcpd.leases(5) manual page.
13 # This lease file was written by isc-dhcp-4.3.5
15 # authoring-byte-order entry is generated, DO NOT DELETE
16 authoring-byte-order little-endian;
19 for ((master=0;master<NUM_MASTERS;++master)); do
20 lease=$(virsh net-dhcp-leases baremetal |grep "master-${master}")
21 mac=$(echo $lease | cut -d " " -f 3)
22 ip=$(echo $lease | cut -d " " -f 5)
24 cat <<EOF >> /opt/icn/dhcp/dhcpd.leases
26 starts 4 2019/08/08 22:32:49;
27 ends 4 2019/08/08 23:52:49;
28 cltt 4 2019/08/08 22:32:49;
30 next binding state free;
31 rewind binding state free;
32 hardware ethernet ${mac};
33 client-hostname "master-${master}";
37 for ((worker=0;worker<NUM_WORKERS;++worker)); do
38 lease=$(virsh net-dhcp-leases baremetal |grep "worker-${worker}")
39 mac=$(echo $lease | cut -d " " -f 3)
40 ip=$(echo $lease | cut -d " " -f 5)
42 cat <<EOF >> /opt/icn/dhcp/dhcpd.leases
44 starts 4 2019/08/08 22:32:49;
45 ends 4 2019/08/08 23:52:49;
46 cltt 4 2019/08/08 22:32:49;
48 next binding state free;
49 rewind binding state free;
50 hardware ethernet ${mac};
51 client-hostname "worker-${worker}";
56 # Create provisioning CR file for testing
57 cat <<EOF > e2etest/e2e_test_provisioning_cr.yaml
58 apiVersion: bpa.akraino.org/v1alpha1
61 name: e2e-test-provisioning
63 cluster: ${CLUSTER_NAME}
68 for ((master=0;master<NUM_MASTERS;++master)); do
69 lease=$(virsh net-dhcp-leases baremetal |grep "master-${master}")
70 mac=$(echo $lease | cut -d " " -f 3)
71 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
76 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
79 for ((worker=0;worker<NUM_WORKERS;++worker)); do
80 lease=$(virsh net-dhcp-leases baremetal |grep "worker-${worker}")
81 mac=$(echo $lease | cut -d " " -f 3)
82 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
87 cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
91 kubectl apply -f e2etest/e2e_test_provisioning_cr.yaml
94 #Check Status of kud job pod
97 while [[ $status == "Running" ]]
99 echo "KUD install job still running"
101 stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
102 status=$(echo $stats | cut -d " " -f 3)
105 #Print logs of Job Pod
106 jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
107 podName=$(echo $jobPod | cut -d " " -f 1)
108 printf "\nNow Printing Job pod logs\n"
109 kubectl logs $podName
111 if [[ $status == "Completed" ]];
113 printf "KUD Install Job completed\n"
114 printf "Checking cluster status\n"
116 source ../../env/lib/common.sh
117 CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
118 APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
119 TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
120 if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
122 printf "\nKubernetes Cluster Install did not complete successfully\n"
125 printf "\nKubernetes Cluster Install was successful\n"
129 printf "KUD Install Job failed\n"
133 function emcoctl_apply {
134 # Workaround known issue with emcoctl resource instantation by retrying
135 # until a 2xx is received.
137 until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
138 awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
139 if [[ $try -lt 10 ]]; then
140 echo "Waiting for KUD addons to instantiate"
150 function emcoctl_delete {
151 # Workaround known issue with emcoctl resource deletion by retrying
152 # until a 404 is received.
153 until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
154 awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
155 echo "Waiting for KUD addons to terminate"
160 function wait_for_addons_ready {
161 #Wait for addons to be ready
162 # The deployment intent group status reports instantiated before all
163 # Pods are ready, so wait for the instance label (.spec.version) of
164 # the deployment intent group instead.
166 for try in {0..9}; do
167 printf "Waiting for KUD addons to be ready\n"
169 if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 2>/dev/null >/dev/null; then
174 [[ $status == "Ready" ]]
178 printf "Installing KUD addons\n"
179 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
180 emcoctl_apply 00-controllers.yaml
181 emcoctl_apply 01-cluster.yaml
182 emcoctl_apply 02-project.yaml
183 emcoctl_apply 03-addons-app.yaml
185 wait_for_addons_ready
187 #Workaround for sriov+kubevirt issue on single-node clusters
188 # The issue is kubevirt creates a PodDisruptionBudget that prevents
189 # sriov from succesfully draining the node. The workaround is to
190 # temporarily scale down the kubevirt operator while the drain occurs.
191 KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
192 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=0
194 #Install addon resources
195 printf "Installing KUD addon resources\n"
196 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
197 emcoctl_apply 04-addon-resources-app.yaml
199 wait_for_addons_ready
201 #Workaround for sriov+kubevirt issue on single-node clusters
202 # Scale the kubevirt operator back up and wait things to be ready
204 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
205 KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
206 wait_for_addons_ready
209 printf "Testing KUD addons\n"
210 pushd /opt/kud/multi-cluster/addons/tests
212 container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
213 if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
214 #With containerd 1.2.13, the qat test container image fails to unpack.
215 kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
217 kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
219 for test in ${kud_tests}; do
220 KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
222 KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
223 if [[ ! -z "$failed_kud_tests" ]]; then
224 printf "Test cases failed:${failed_kud_tests}\n"
228 printf "All test cases passed\n"
231 printf "\n\nBeginning E2E Test Teardown\n\n"
232 pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
233 emcoctl_delete 04-addon-resources-app.yaml
234 emcoctl_delete 03-addons-app.yaml
235 emcoctl_delete 02-project.yaml
236 emcoctl_delete 01-cluster.yaml
237 emcoctl_delete 00-controllers.yaml
239 kubectl delete -f e2etest/e2e_test_provisioning_cr.yaml
240 kubectl delete job kud-${CLUSTER_NAME}
241 kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
242 rm e2etest/e2e_test_provisioning_cr.yaml
243 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
244 rm -rf /opt/kud/multi-cluster/addons
245 rm /opt/icn/dhcp/dhcpd.leases