X-Git-Url: https://gerrit.akraino.org/r/gitweb?a=blobdiff_plain;f=cmd%2Fbpa-operator%2Fe2etest%2Fbpa_vm_verifier.sh;h=d22ccae07363e68845329e6506e9a7cbfa73b9e6;hb=3fc14a5eaf37abd47c8ae725342e7fa4d6747aa5;hp=b1f490e6a5133f660adc1bccd3e3343500ada10d;hpb=6b4e599bbee2e90b205c2d8a9693289b35574363;p=icn.git diff --git a/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh b/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh index b1f490e..d22ccae 100755 --- a/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh +++ b/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh @@ -2,17 +2,8 @@ set -eu -o pipefail CLUSTER_NAME=cluster-test - -# Get MAC and IP addresses of VMs provisioned by metal3 -master0=$(virsh net-dhcp-leases baremetal |grep master-0) -masterMAC=$(echo $master0 | cut -d " " -f 3) -masterIP=$(echo $master0 | cut -d " " -f 5) -masterIP="${masterIP%%/*}" - -worker0=$(virsh net-dhcp-leases baremetal |grep worker-0) -workerMAC=$(echo $worker0 | cut -d " " -f 3) -workerIP=$(echo $worker0 | cut -d " " -f 5) -workerIP="${workerIP%%/*}" +NUM_MASTERS=${NUM_MASTERS:-"1"} +NUM_WORKERS=${NUM_WORKERS:-"1"} # Create Fake DHCP File mkdir -p /opt/icn/dhcp @@ -23,27 +14,43 @@ cat < /opt/icn/dhcp/dhcpd.leases # authoring-byte-order entry is generated, DO NOT DELETE authoring-byte-order little-endian; -lease ${masterIP} { +EOF +for ((master=0;master> /opt/icn/dhcp/dhcpd.leases +lease ${ip} { starts 4 2019/08/08 22:32:49; ends 4 2019/08/08 23:52:49; cltt 4 2019/08/08 22:32:49; binding state active; next binding state free; rewind binding state free; - hardware ethernet ${masterMAC}; - client-hostname "master-0"; + hardware ethernet ${mac}; + client-hostname "master-${master}"; } -lease ${workerIP} { +EOF +done +for ((worker=0;worker> /opt/icn/dhcp/dhcpd.leases +lease ${ip} { starts 4 2019/08/08 22:32:49; ends 4 2019/08/08 23:52:49; cltt 4 2019/08/08 22:32:49; binding state active; next binding state free; rewind binding state free; - hardware ethernet ${workerMAC}; - client-hostname "worker-0"; + hardware ethernet ${mac}; + client-hostname "worker-${worker}"; } EOF +done # Create provisioning CR file for testing cat < e2etest/e2e_test_provisioning_cr.yaml @@ -56,11 +63,27 @@ metadata: owner: c1 spec: masters: - - master-0: - mac-address: ${masterMAC} +EOF +for ((master=0;master> e2etest/e2e_test_provisioning_cr.yaml + - master-${master}: + mac-address: ${mac} +EOF +done +cat <> e2etest/e2e_test_provisioning_cr.yaml workers: - - worker-0: - mac-address: ${workerMAC} +EOF +for ((worker=0;worker> e2etest/e2e_test_provisioning_cr.yaml + - worker-${worker}: + mac-address: ${mac} +EOF +done +cat <> e2etest/e2e_test_provisioning_cr.yaml KUDPlugins: - emco EOF @@ -72,45 +95,120 @@ status="Running" while [[ $status == "Running" ]] do - echo "KUD install job still running" - sleep 2m - stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME}) - status=$(echo $stats | cut -d " " -f 3) + echo "KUD install job still running" + sleep 2m + stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME}) + status=$(echo $stats | cut -d " " -f 3) done +#Print logs of Job Pod +jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME}) +podName=$(echo $jobPod | cut -d " " -f 1) +printf "\nNow Printing Job pod logs\n" +kubectl logs $podName + if [[ $status == "Completed" ]]; then printf "KUD Install Job completed\n" printf "Checking cluster status\n" source ../../env/lib/common.sh - KUBECONFIG=--kubeconfig=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf - APISERVER=$(kubectl ${KUBECONFIG} config view --minify -o jsonpath='{.clusters[0].cluster.server}') - TOKEN=$(kubectl ${KUBECONFIG} get secret $(kubectl ${KUBECONFIG} get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode ) + CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf + APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}') + TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode) if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure; then - printf "\nKubernetes Cluster Install did not complete successfully\n" + printf "\nKubernetes Cluster Install did not complete successfully\n" + exit 1 else - printf "\nKubernetes Cluster Install was successful\n" + printf "\nKubernetes Cluster Install was successful\n" fi else - printf "KUD Install Job failed\n" + printf "KUD Install Job failed\n" + exit 1 fi +#Install addons +printf "Installing KUD addons\n" +pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons +/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f prerequisites.yaml -v values.yaml +/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values.yaml +popd -#Print logs of Job Pod -jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME}) -podName=$(echo $jobPod | cut -d " " -f 1) -printf "\nNow Printing Job pod logs\n" -kubectl logs $podName +#Wait for addons to be ready +# The deployment intent group status reports instantiated before all +# Pods are ready, so wait for the instance label (.spec.version) of +# the deployment intent group instead. +status="Pending" +for try in {0..9}; do + printf "Waiting for KUD addons to be ready\n" + sleep 30s + if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then + status="Ready" + break + fi +done +[[ $status == "Ready" ]] + +#Install addon resources +printf "Installing KUD addon resources\n" +pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons +/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values-resources.yaml +popd -#Teardown Setup +#Wait for addon resources to be ready +status="Pending" +for try in {0..9}; do + printf "Waiting for KUD addon resources to be ready\n" + sleep 30s + if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then + status="Ready" + break + fi +done +[[ $status == "Ready" ]] + +#Test addons +printf "Testing KUD addons\n" +pushd /opt/kud/multi-cluster/addons/tests +failed_kud_tests="" +for addon in multus ovn4nfv nfd sriov-network qat cmk; do + KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}" +done +if [[ ! -z "$failed_kud_tests" ]]; then + printf "Test cases failed:${failed_kud_tests}\n" + exit 1 +fi +popd +printf "All test cases passed\n" + +#Tear down setup printf "\n\nBeginning E2E Test Teardown\n\n" +# Workaround known issue with emcoctl resource deletion by retrying +# until a 404 is received. +pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons +until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values-resources.yaml | + awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do + echo "Waiting for KUD addon resources to terminate" + sleep 1s +done +until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values.yaml | + awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do + echo "Waiting for KUD addons to terminate" + sleep 1s +done +until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f prerequisites.yaml -v values.yaml | + awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do + echo "Waiting for KUD addons to terminate" + sleep 1s +done +popd kubectl delete -f e2etest/e2e_test_provisioning_cr.yaml kubectl delete job kud-${CLUSTER_NAME} -kubectl delete configmap ${CLUSTER_NAME}-configmap +kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap rm e2etest/e2e_test_provisioning_cr.yaml rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME} +rm -rf /opt/kud/multi-cluster/addons rm /opt/icn/dhcp/dhcpd.leases make delete