Deploy addons via EMCO 18/4218/7
authorTodd Malsbary <todd.malsbary@intel.com>
Fri, 9 Apr 2021 00:25:36 +0000 (17:25 -0700)
committerKuralamudhan Ramakrishnan <kuralamudhan.ramakrishnan@intel.com>
Mon, 24 May 2021 17:04:02 +0000 (17:04 +0000)
Signed-off-by: Todd Malsbary <todd.malsbary@intel.com>
Change-Id: Ie99b91cea63e081c6400a07d8fe1c7dd177eb32f

cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh
cmd/bpa-operator/e2etest/bpa_vm_verifier.sh

index aa0d8dc..9834811 100755 (executable)
@@ -11,43 +11,118 @@ status="Running"
 
 while [[ $status == "Running" ]]
 do
-        echo "KUD install job still running"
-        sleep 2m
-        stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
-        status=$(echo $stats | cut -d " " -f 3)
+    echo "KUD install job still running"
+    sleep 2m
+    stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
+    status=$(echo $stats | cut -d " " -f 3)
 done
 
+#Print logs of Job Pod
+jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
+podName=$(echo $jobPod | cut -d " " -f 1)
+printf "\nNow Printing Job pod logs\n"
+kubectl logs $podName
+
 if [[ $status == "Completed" ]];
 then
    printf "KUD Install Job completed\n"
    printf "Checking cluster status\n"
 
    source ../../env/lib/common.sh
-   KUBECONFIG=--kubeconfig=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
-   APISERVER=$(kubectl ${KUBECONFIG} config view --minify -o jsonpath='{.clusters[0].cluster.server}')
-   TOKEN=$(kubectl ${KUBECONFIG} get secret $(kubectl ${KUBECONFIG} get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode )
+   CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
+   APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
+   TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
    if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
    then
-     printf "\nKubernetes Cluster Install did not complete successfully\n"
+       printf "\nKubernetes Cluster Install did not complete successfully\n"
+       exit 1
    else
-     printf "\nKubernetes Cluster Install was successful\n"
+       printf "\nKubernetes Cluster Install was successful\n"
    fi
 
 else
-   printf "KUD Install Job failed\n"
+    printf "KUD Install Job failed\n"
+    exit 1
 fi
 
+#Install addons
+printf "Installing KUD addons\n"
+pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f prerequisites.yaml -v values.yaml
+/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values.yaml
+popd
 
-#Print logs of Job Pod
-jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
-podName=$(echo $jobPod | cut -d " " -f 1)
-printf "\nNow Printing Job pod logs\n"
-kubectl logs $podName
+#Wait for addons to be ready
+# The deployment intent group status reports instantiated before all
+# Pods are ready, so wait for the instance label (.spec.version) of
+# the deployment intent group instead.
+status="Pending"
+for try in {0..19}; do
+    printf "Waiting for KUD addons to be ready\n"
+    sleep 30s
+    if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
+        status="Ready"
+        break
+    fi
+done
+[[ $status == "Ready" ]]
+
+#Install addon resources
+printf "Installing KUD addon resources\n"
+pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values-resources.yaml
+popd
+
+#Wait for addon resources to be ready
+status="Pending"
+for try in {0..9}; do
+    printf "Waiting for KUD addon resources to be ready\n"
+    sleep 30s
+    if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
+       status="Ready"
+       break
+    fi
+done
+[[ $status == "Ready" ]]
+
+#Test addons
+printf "Testing KUD addons\n"
+pushd /opt/kud/multi-cluster/addons/tests
+failed_kud_tests=""
+for addon in multus ovn4nfv nfd sriov-network qat cmk; do
+    KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}"
+done
+if [[ ! -z "$failed_kud_tests" ]]; then
+    printf "Test cases failed:${failed_kud_tests}\n"
+    exit 1
+fi
+popd
+printf "All test cases passed\n"
 
 #Tear down setup
 printf "\n\nBeginning BMH E2E Test Teardown\n\n"
+# Workaround known issue with emcoctl resource deletion by retrying
+# until a 404 is received.
+pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values-resources.yaml |
+            awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
+    echo "Waiting for KUD addon resources to terminate"
+    sleep 1s
+done
+until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values.yaml |
+            awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
+    echo "Waiting for KUD addons to terminate"
+    sleep 1s
+done
+until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f prerequisites.yaml -v values.yaml |
+            awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
+    echo "Waiting for KUD addons to terminate"
+    sleep 1s
+done
+popd
 kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml
 kubectl delete job kud-${CLUSTER_NAME}
 kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
+rm -rf /opt/kud/multi-cluster/addons
 make delete
index 7f4fcae..d22ccae 100755 (executable)
@@ -95,45 +95,120 @@ status="Running"
 
 while [[ $status == "Running" ]]
 do
-       echo "KUD install job still running"
-       sleep 2m
-       stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
-       status=$(echo $stats | cut -d " " -f 3)
+    echo "KUD install job still running"
+    sleep 2m
+    stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
+    status=$(echo $stats | cut -d " " -f 3)
 done
 
+#Print logs of Job Pod
+jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
+podName=$(echo $jobPod | cut -d " " -f 1)
+printf "\nNow Printing Job pod logs\n"
+kubectl logs $podName
+
 if [[ $status == "Completed" ]];
 then
    printf "KUD Install Job completed\n"
    printf "Checking cluster status\n"
 
    source ../../env/lib/common.sh
-   KUBECONFIG=--kubeconfig=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
-   APISERVER=$(kubectl ${KUBECONFIG} config view --minify -o jsonpath='{.clusters[0].cluster.server}')
-   TOKEN=$(kubectl ${KUBECONFIG} get secret $(kubectl ${KUBECONFIG} get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode )
+   CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
+   APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
+   TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
    if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
    then
-     printf "\nKubernetes Cluster Install did not complete successfully\n"
+       printf "\nKubernetes Cluster Install did not complete successfully\n"
+       exit 1
    else
-     printf "\nKubernetes Cluster Install was successful\n"
+       printf "\nKubernetes Cluster Install was successful\n"
    fi
 
 else
-   printf "KUD Install Job failed\n"
+    printf "KUD Install Job failed\n"
+    exit 1
 fi
 
+#Install addons
+printf "Installing KUD addons\n"
+pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f prerequisites.yaml -v values.yaml
+/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values.yaml
+popd
 
-#Print logs of Job Pod
-jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
-podName=$(echo $jobPod | cut -d " " -f 1)
-printf "\nNow Printing Job pod logs\n"
-kubectl logs $podName
+#Wait for addons to be ready
+# The deployment intent group status reports instantiated before all
+# Pods are ready, so wait for the instance label (.spec.version) of
+# the deployment intent group instead.
+status="Pending"
+for try in {0..9}; do
+    printf "Waiting for KUD addons to be ready\n"
+    sleep 30s
+    if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
+        status="Ready"
+        break
+    fi
+done
+[[ $status == "Ready" ]]
+
+#Install addon resources
+printf "Installing KUD addon resources\n"
+pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f composite-app.yaml -v values-resources.yaml
+popd
+
+#Wait for addon resources to be ready
+status="Pending"
+for try in {0..9}; do
+    printf "Waiting for KUD addon resources to be ready\n"
+    sleep 30s
+    if KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --all-namespaces --timeout=0s 2>/dev/null >/dev/null; then
+       status="Ready"
+       break
+    fi
+done
+[[ $status == "Ready" ]]
+
+#Test addons
+printf "Testing KUD addons\n"
+pushd /opt/kud/multi-cluster/addons/tests
+failed_kud_tests=""
+for addon in multus ovn4nfv nfd sriov-network qat cmk; do
+    KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${addon}.sh || failed_kud_tests="${failed_kud_tests} ${addon}"
+done
+if [[ ! -z "$failed_kud_tests" ]]; then
+    printf "Test cases failed:${failed_kud_tests}\n"
+    exit 1
+fi
+popd
+printf "All test cases passed\n"
 
-#Teardown Setup
+#Tear down setup
 printf "\n\nBeginning E2E Test Teardown\n\n"
+# Workaround known issue with emcoctl resource deletion by retrying
+# until a 404 is received.
+pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values-resources.yaml |
+            awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
+    echo "Waiting for KUD addon resources to terminate"
+    sleep 1s
+done
+until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f composite-app.yaml -v values.yaml |
+            awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
+    echo "Waiting for KUD addons to terminate"
+    sleep 1s
+done
+until [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f prerequisites.yaml -v values.yaml |
+            awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]; do
+    echo "Waiting for KUD addons to terminate"
+    sleep 1s
+done
+popd
 kubectl delete -f e2etest/e2e_test_provisioning_cr.yaml
 kubectl delete job kud-${CLUSTER_NAME}
 kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
 rm e2etest/e2e_test_provisioning_cr.yaml
 rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
+rm -rf /opt/kud/multi-cluster/addons
 rm /opt/icn/dhcp/dhcpd.leases
 make delete