Extract BareMetalHost creation from metal3 scripts 79/4479/3
authorTodd Malsbary <todd.malsbary@intel.com>
Wed, 27 Oct 2021 18:36:49 +0000 (11:36 -0700)
committerKuralamudhan Ramakrishnan <kuralamudhan.ramakrishnan@intel.com>
Tue, 16 Nov 2021 17:31:42 +0000 (17:31 +0000)
This is in preparation for Cluster API support.

Signed-off-by: Todd Malsbary <todd.malsbary@intel.com>
Change-Id: I30536d754a281ec9c5b3cbe4da15cb74b70f5434

Makefile
Vagrantfile
cmd/bpa-operator/Makefile
cmd/bpa-operator/e2etest/bpa_bmh_verifier.sh

index 84ff14b..e32a5ac 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -11,25 +11,24 @@ BOOTLOADER_ENV:=$(CURDIR)/env/ubuntu/bootloader-env
 help:
        @echo "  Targets:"
        @echo "  test             -- run unit tests"
-       @echo "  installer        -- run icn installer"
+       @echo "  jump_server      -- install jump server into this machine"
+       @echo "  cluster          -- provision cluster(s)"
        @echo "  verifier         -- run verifier tests for CI & CD logs"
        @echo "  unit             -- run the unit tests"
        @echo "  help             -- this help output"
 
-install: package_prerequisite \
+install: jump_server \
+       bmh_provision
+
+jump_server: package_prerequisite \
        kud_bm_deploy_mini \
-       bmh_all \
+       bmh_install \
        bpa_op_install \
        bpa_rest_api_install
 
 package_prerequisite:
         pushd $(BMDIR) && ./01_install_package.sh && popd
 
-bmh_preinstall:
-       source user_config.sh && env && \
-       pushd $(BMDIR) && ./02_configure.sh && \
-       ./03_launch_prereq.sh && popd
-
 bmh_clean:
        pushd $(METAL3DIR) && ./01_metal3.sh deprovision && \
        ./03_verify_deprovisioning.sh && ./01_metal3.sh clean && \
@@ -46,18 +45,33 @@ clean_bm_packages:
        pushd $(BOOTLOADER_ENV) && \
         ./02_clean_bootloader_package_req.sh --bm-cleanall && popd
 
-bmh_install:
+bmh_preinstall:
        source user_config.sh && env && \
-       pushd $(METAL3DIR) && ./01_metal3.sh launch && \
-        ./01_metal3.sh provision && ./02_verify.sh && popd
+       pushd $(BMDIR) && ./02_configure.sh && \
+       ./03_launch_prereq.sh && popd
+
+bmh_install: bmh_preinstall
+       source user_config.sh && env && \
+       pushd $(METAL3DIR) && ./01_metal3.sh launch && popd
 
-bmh_all: bmh_preinstall bmh_install
+bmh_provision:
+       source user_config.sh && env && \
+       pushd $(METAL3DIR) && ./01_metal3.sh provision && \
+       ./02_verify.sh && popd
+
+bmh_all: bmh_install bmh_provision
 
 clean_all: bmh_clean \
        bmh_clean_host \
        kud_bm_reset \
        clean_packages
 
+cluster_provision:
+       pushd $(BPA_OPERATOR) && make provision && popd
+
+cluster: bmh_provision \
+       cluster_provision
+
 kud_bm_deploy_mini:
        pushd $(KUD_PATH) && ./kud_bm_launch.sh minimal v1 && popd
 
index aa6d2d1..dd97ad3 100644 (file)
@@ -21,7 +21,8 @@ To get started with ICN:
   $ vagrant ssh jump
   vagrant@jump:~$ sudo su
   root@jump:/home/vagrant# cd /icn
-  root@jump:/home/vagrant# make install
+  root@jump:/icn# make jump_server
+  root@jump:/icn# make cluster
 
 ------------------------------------------------------
 MSG
index 528f58c..d9c1321 100644 (file)
@@ -59,8 +59,13 @@ unit_test:
 
 .PHONY: e2etest_bmh
 e2etest_bmh:
-       ./e2etest/bpa_bmh_verifier.sh
+       ./e2etest/bpa_bmh_verifier.sh provision
+       ./e2etest/bpa_bmh_verifier.sh teardown
 
 .PHONY: e2etest_virtletvm
 e2etest_virtletvm:
        cd e2etest && ./bpa_virtletvm_verifier.sh
+
+.PHONY: provision
+provision:
+       ./e2etest/bpa_bmh_verifier.sh provision
index 21fb34a..8b18d33 100755 (executable)
@@ -4,48 +4,6 @@ set -eu -o pipefail
 CLUSTER_NAME=test-bmh-cluster
 ADDONS_NAMESPACE=kud
 
-kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
-sleep 5
-
-#Check Status of kud job pod
-status="Running"
-
-while [[ $status == "Running" ]]
-do
-    echo "KUD install job still running"
-    sleep 2m
-    stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
-    status=$(echo $stats | cut -d " " -f 3)
-done
-
-#Print logs of Job Pod
-jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
-podName=$(echo $jobPod | cut -d " " -f 1)
-printf "\nNow Printing Job pod logs\n"
-kubectl logs $podName
-
-if [[ $status == "Completed" ]];
-then
-   printf "KUD Install Job completed\n"
-   printf "Checking cluster status\n"
-
-   source ../../env/lib/common.sh
-   CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
-   APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
-   TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
-   if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
-   then
-       printf "\nKubernetes Cluster Install did not complete successfully\n"
-       exit 1
-   else
-       printf "\nKubernetes Cluster Install was successful\n"
-   fi
-
-else
-    printf "KUD Install Job failed\n"
-    exit 1
-fi
-
 function wait_for {
     local -r interval=30
     for ((try=0;try<600;try+=${interval})); do
@@ -134,62 +92,107 @@ function kubevirt_terminated {
     [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get cdi --no-headers 2>/dev/null | wc -l) == 0 ]]
 }
 
-#Apply addons
-printf "Applying KUD addons\n"
-pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
-emcoctl apply 00-controllers.yaml
-emcoctl apply 01-cluster.yaml
-emcoctl apply 02-project.yaml
-emcoctl apply 03-addons-app.yaml
-popd
-
-#Instantiate addons
-emcoctl instantiate addons
-wait_for addons_instantiated
-emcoctl instantiate networks
-wait_for networks_instantiated
-emcoctl instantiate kubevirt
-wait_for kubevirt_instantiated
-
-#Test addons
-printf "Testing KUD addons\n"
-pushd /opt/kud/multi-cluster/addons/tests
-failed_kud_tests=""
-container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
-if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
-    #With containerd 1.2.13, the qat test container image fails to unpack.
-    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
-else
-    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
-fi
-for test in ${kud_tests}; do
-    KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
-done
-KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
-if [[ ! -z "$failed_kud_tests" ]]; then
-    printf "Test cases failed:${failed_kud_tests}\n"
-    exit 1
+if [[ $1 == "provision" ]]; then
+    kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
+    sleep 5
+
+    #Check Status of kud job pod
+    status="Running"
+
+    while [[ $status == "Running" ]]
+    do
+       echo "KUD install job still running"
+       sleep 2m
+       stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
+       status=$(echo $stats | cut -d " " -f 3)
+    done
+
+    #Print logs of Job Pod
+    jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
+    podName=$(echo $jobPod | cut -d " " -f 1)
+    printf "\nNow Printing Job pod logs\n"
+    kubectl logs $podName
+
+    if [[ $status == "Completed" ]];
+    then
+       printf "KUD Install Job completed\n"
+       printf "Checking cluster status\n"
+
+       source ../../env/lib/common.sh
+       CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
+       APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
+       TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
+       if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
+       then
+           printf "\nKubernetes Cluster Install did not complete successfully\n"
+           exit 1
+       else
+           printf "\nKubernetes Cluster Install was successful\n"
+       fi
+
+    else
+       printf "KUD Install Job failed\n"
+       exit 1
+    fi
+
+    #Apply addons
+    printf "Applying KUD addons\n"
+    pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+    emcoctl apply 00-controllers.yaml
+    emcoctl apply 01-cluster.yaml
+    emcoctl apply 02-project.yaml
+    emcoctl apply 03-addons-app.yaml
+    popd
+
+    #Instantiate addons
+    emcoctl instantiate addons
+    wait_for addons_instantiated
+    emcoctl instantiate networks
+    wait_for networks_instantiated
+    emcoctl instantiate kubevirt
+    wait_for kubevirt_instantiated
+
+    #Test addons
+    printf "Testing KUD addons\n"
+    pushd /opt/kud/multi-cluster/addons/tests
+    failed_kud_tests=""
+    container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
+    if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
+       #With containerd 1.2.13, the qat test container image fails to unpack.
+       kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
+    else
+       kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
+    fi
+    for test in ${kud_tests}; do
+       KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
+    done
+    KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
+    if [[ ! -z "$failed_kud_tests" ]]; then
+       printf "Test cases failed:${failed_kud_tests}\n"
+       exit 1
+    fi
+    popd
+    printf "All test cases passed\n"
+elif [[ $1 == "teardown" ]]; then
+    CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
+    #Tear down setup
+    printf "\n\nBeginning BMH E2E Test Teardown\n\n"
+    emcoctl terminate kubevirt
+    wait_for kubevirt_terminated
+    emcoctl terminate networks
+    wait_for networks_terminated
+    emcoctl terminate addons
+    wait_for addons_terminated
+    pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+    emcoctl delete 03-addons-app.yaml
+    emcoctl delete 02-project.yaml
+    emcoctl delete 01-cluster.yaml
+    emcoctl delete 00-controllers.yaml
+    popd
+    kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml
+    kubectl delete job kud-${CLUSTER_NAME}
+    kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
+    rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
+    rm -rf /opt/kud/multi-cluster/addons
+    make delete
 fi
-popd
-printf "All test cases passed\n"
-
-#Tear down setup
-printf "\n\nBeginning BMH E2E Test Teardown\n\n"
-emcoctl terminate kubevirt
-wait_for kubevirt_terminated
-emcoctl terminate networks
-wait_for networks_terminated
-emcoctl terminate addons
-wait_for addons_terminated
-pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
-emcoctl delete 03-addons-app.yaml
-emcoctl delete 02-project.yaml
-emcoctl delete 01-cluster.yaml
-emcoctl delete 00-controllers.yaml
-popd
-kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml
-kubectl delete job kud-${CLUSTER_NAME}
-kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
-rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
-rm -rf /opt/kud/multi-cluster/addons
-make delete