Add kubevirt to e2e tested addons
[icn.git] / cmd / bpa-operator / e2etest / bpa_vm_verifier.sh
index b1f490e..22ebea8 100755 (executable)
@@ -2,17 +2,8 @@
 set -eu -o pipefail
 
 CLUSTER_NAME=cluster-test
-
-# Get MAC and IP addresses of VMs provisioned by metal3
-master0=$(virsh net-dhcp-leases baremetal |grep master-0)
-masterMAC=$(echo $master0 | cut -d " " -f 3)
-masterIP=$(echo $master0 | cut -d " " -f 5)
-masterIP="${masterIP%%/*}"
-
-worker0=$(virsh net-dhcp-leases baremetal |grep worker-0)
-workerMAC=$(echo $worker0 | cut -d " " -f 3)
-workerIP=$(echo $worker0 | cut -d " " -f 5)
-workerIP="${workerIP%%/*}"
+NUM_MASTERS=${NUM_MASTERS:-"1"}
+NUM_WORKERS=${NUM_WORKERS:-"1"}
 
 # Create Fake DHCP File
 mkdir -p /opt/icn/dhcp
@@ -23,27 +14,43 @@ cat <<EOF > /opt/icn/dhcp/dhcpd.leases
 # authoring-byte-order entry is generated, DO NOT DELETE
 authoring-byte-order little-endian;
 
-lease ${masterIP} {
+EOF
+for ((master=0;master<NUM_MASTERS;++master)); do
+    lease=$(virsh net-dhcp-leases baremetal |grep "master-${master}")
+    mac=$(echo $lease | cut -d " " -f 3)
+    ip=$(echo $lease | cut -d " " -f 5)
+    ip="${ip%%/*}"
+    cat <<EOF >> /opt/icn/dhcp/dhcpd.leases
+lease ${ip} {
   starts 4 2019/08/08 22:32:49;
   ends 4 2019/08/08 23:52:49;
   cltt 4 2019/08/08 22:32:49;
   binding state active;
   next binding state free;
   rewind binding state free;
-  hardware ethernet ${masterMAC};
-  client-hostname "master-0";
+  hardware ethernet ${mac};
+  client-hostname "master-${master}";
 }
-lease ${workerIP} {
+EOF
+done
+for ((worker=0;worker<NUM_WORKERS;++worker)); do
+    lease=$(virsh net-dhcp-leases baremetal |grep "worker-${worker}")
+    mac=$(echo $lease | cut -d " " -f 3)
+    ip=$(echo $lease | cut -d " " -f 5)
+    ip="${ip%%/*}"
+    cat <<EOF >> /opt/icn/dhcp/dhcpd.leases
+lease ${ip} {
   starts 4 2019/08/08 22:32:49;
   ends 4 2019/08/08 23:52:49;
   cltt 4 2019/08/08 22:32:49;
   binding state active;
   next binding state free;
   rewind binding state free;
-  hardware ethernet ${workerMAC};
-  client-hostname "worker-0";
+  hardware ethernet ${mac};
+  client-hostname "worker-${worker}";
 }
 EOF
+done
 
 # Create provisioning CR file for testing
 cat <<EOF > e2etest/e2e_test_provisioning_cr.yaml
@@ -56,11 +63,27 @@ metadata:
     owner: c1
 spec:
   masters:
-    - master-0:
-        mac-address: ${masterMAC}
+EOF
+for ((master=0;master<NUM_MASTERS;++master)); do
+    lease=$(virsh net-dhcp-leases baremetal |grep "master-${master}")
+    mac=$(echo $lease | cut -d " " -f 3)
+    cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
+    - master-${master}:
+        mac-address: ${mac}
+EOF
+done
+cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
   workers:
-    - worker-0:
-        mac-address: ${workerMAC}
+EOF
+for ((worker=0;worker<NUM_WORKERS;++worker)); do
+    lease=$(virsh net-dhcp-leases baremetal |grep "worker-${worker}")
+    mac=$(echo $lease | cut -d " " -f 3)
+    cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
+    - worker-${worker}:
+        mac-address: ${mac}
+EOF
+done
+cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
   KUDPlugins:
     - emco
 EOF
@@ -72,45 +95,151 @@ status="Running"
 
 while [[ $status == "Running" ]]
 do
-       echo "KUD install job still running"
-       sleep 2m
-       stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
-       status=$(echo $stats | cut -d " " -f 3)
+    echo "KUD install job still running"
+    sleep 2m
+    stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
+    status=$(echo $stats | cut -d " " -f 3)
 done
 
+#Print logs of Job Pod
+jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
+podName=$(echo $jobPod | cut -d " " -f 1)
+printf "\nNow Printing Job pod logs\n"
+kubectl logs $podName
+
 if [[ $status == "Completed" ]];
 then
    printf "KUD Install Job completed\n"
    printf "Checking cluster status\n"
 
    source ../../env/lib/common.sh
-   KUBECONFIG=--kubeconfig=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
-   APISERVER=$(kubectl ${KUBECONFIG} config view --minify -o jsonpath='{.clusters[0].cluster.server}')
-   TOKEN=$(kubectl ${KUBECONFIG} get secret $(kubectl ${KUBECONFIG} get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode )
+   CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
+   APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
+   TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
    if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
    then
-     printf "\nKubernetes Cluster Install did not complete successfully\n"
+       printf "\nKubernetes Cluster Install did not complete successfully\n"
+       exit 1
    else
-     printf "\nKubernetes Cluster Install was successful\n"
+       printf "\nKubernetes Cluster Install was successful\n"
    fi
 
 else
-   printf "KUD Install Job failed\n"
+    printf "KUD Install Job failed\n"
+    exit 1
 fi
 
+function emcoctl_apply {
+    # Workaround known issue with emcoctl resource instantation by retrying
+    # until a 2xx is received.
+    try=0
+    until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml  |
+                   awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]; do
+        if [[ $try -lt 10 ]]; then
+            echo "Waiting for KUD addons to terminate"
+            sleep 1s
+        else
+            return 1
+        fi
+        try=$((try + 1))
+    done
+    return 0
+}
 
-#Print logs of Job Pod
-jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
-podName=$(echo $jobPod | cut -d " " -f 1)
-printf "\nNow Printing Job pod logs\n"
-kubectl logs $podName
+function emcoctl_delete {
+    # Workaround known issue with emcoctl resource deletion by retrying
+    # until a 404 is received.
+    until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
+                   awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
+        echo "Waiting for KUD addons to terminate"
+        sleep 1s
+    done
+}
+
+function wait_for_addons_ready {
+    #Wait for addons to be ready
+    # The deployment intent group status reports instantiated before all
+    # Pods are ready, so wait for the instance label (.spec.version) of
+    # the deployment intent group instead.
+    status="Pending"
+    for try in {0..9}; do
+       printf "Waiting for KUD addons to be ready\n"
+       sleep 30s
+       if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
+            status="Ready"
+            break
+       fi
+    done
+    [[ $status == "Ready" ]]
+}
+
+#Install addons
+printf "Installing KUD addons\n"
+pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+emcoctl_apply 00-controllers.yaml
+emcoctl_apply 01-cluster.yaml
+emcoctl_apply 02-project.yaml
+emcoctl_apply 03-addons-app.yaml
+popd
+wait_for_addons_ready
+
+#Workaround for sriov+kubevirt issue on single-node clusters
+# The issue is kubevirt creates a PodDisruptionBudget that prevents
+# sriov from succesfully draining the node.  The workaround is to
+# temporarily scale down the kubevirt operator while the drain occurs.
+KUBEVIRT_OP_REPLICAS=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get deployments/r1-kubevirt-operator -o jsonpath='{.spec.replicas}')
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=0
+
+#Install addon resources
+printf "Installing KUD addon resources\n"
+pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+emcoctl_apply 04-addon-resources-app.yaml
+popd
+wait_for_addons_ready
+
+#Workaround for sriov+kubevirt issue on single-node clusters
+# Scale the kubevirt operator back up and wait things to be ready
+# again.
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait nodes --for=condition=Ready --all
+KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl scale deployments/r1-kubevirt-operator --replicas=${KUBEVIRT_OP_REPLICAS}
+wait_for_addons_ready
+
+#Test addons
+printf "Testing KUD addons\n"
+pushd /opt/kud/multi-cluster/addons/tests
+failed_kud_tests=""
+container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
+if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
+    #With containerd 1.2.13, the qat test container image fails to unpack.
+    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
+else
+    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
+fi
+for test in ${kud_tests}; do
+    KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
+done
+KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
+if [[ ! -z "$failed_kud_tests" ]]; then
+    printf "Test cases failed:${failed_kud_tests}\n"
+    exit 1
+fi
+popd
+printf "All test cases passed\n"
 
-#Teardown Setup
+#Tear down setup
 printf "\n\nBeginning E2E Test Teardown\n\n"
+pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+emcoctl_delete 04-addon-resources-app.yaml
+emcoctl_delete 03-addons-app.yaml
+emcoctl_delete 02-project.yaml
+emcoctl_delete 01-cluster.yaml
+emcoctl_delete 00-controllers.yaml
+popd
 kubectl delete -f e2etest/e2e_test_provisioning_cr.yaml
 kubectl delete job kud-${CLUSTER_NAME}
-kubectl delete configmap ${CLUSTER_NAME}-configmap
+kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
 rm e2etest/e2e_test_provisioning_cr.yaml
 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
+rm -rf /opt/kud/multi-cluster/addons
 rm /opt/icn/dhcp/dhcpd.leases
 make delete