help:
@echo " Targets:"
@echo " test -- run unit tests"
- @echo " installer -- run icn installer"
+ @echo " jump_server -- install jump server into this machine"
+ @echo " cluster -- provision cluster(s)"
@echo " verifier -- run verifier tests for CI & CD logs"
@echo " unit -- run the unit tests"
@echo " help -- this help output"
-install: package_prerequisite \
+install: jump_server \
+ bmh_provision
+
+jump_server: package_prerequisite \
kud_bm_deploy_mini \
- bmh_all \
+ bmh_install \
bpa_op_install \
bpa_rest_api_install
package_prerequisite:
pushd $(BMDIR) && ./01_install_package.sh && popd
-bmh_preinstall:
- source user_config.sh && env && \
- pushd $(BMDIR) && ./02_configure.sh && \
- ./03_launch_prereq.sh && popd
-
bmh_clean:
pushd $(METAL3DIR) && ./01_metal3.sh deprovision && \
./03_verify_deprovisioning.sh && ./01_metal3.sh clean && \
pushd $(BOOTLOADER_ENV) && \
./02_clean_bootloader_package_req.sh --bm-cleanall && popd
-bmh_install:
+bmh_preinstall:
source user_config.sh && env && \
- pushd $(METAL3DIR) && ./01_metal3.sh launch && \
- ./01_metal3.sh provision && ./02_verify.sh && popd
+ pushd $(BMDIR) && ./02_configure.sh && \
+ ./03_launch_prereq.sh && popd
+
+bmh_install: bmh_preinstall
+ source user_config.sh && env && \
+ pushd $(METAL3DIR) && ./01_metal3.sh launch && popd
-bmh_all: bmh_preinstall bmh_install
+bmh_provision:
+ source user_config.sh && env && \
+ pushd $(METAL3DIR) && ./01_metal3.sh provision && \
+ ./02_verify.sh && popd
+
+bmh_all: bmh_install bmh_provision
clean_all: bmh_clean \
bmh_clean_host \
kud_bm_reset \
clean_packages
+cluster_provision:
+ pushd $(BPA_OPERATOR) && make provision && popd
+
+cluster: bmh_provision \
+ cluster_provision
+
kud_bm_deploy_mini:
pushd $(KUD_PATH) && ./kud_bm_launch.sh minimal v1 && popd
CLUSTER_NAME=test-bmh-cluster
ADDONS_NAMESPACE=kud
-kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
-sleep 5
-
-#Check Status of kud job pod
-status="Running"
-
-while [[ $status == "Running" ]]
-do
- echo "KUD install job still running"
- sleep 2m
- stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
- status=$(echo $stats | cut -d " " -f 3)
-done
-
-#Print logs of Job Pod
-jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
-podName=$(echo $jobPod | cut -d " " -f 1)
-printf "\nNow Printing Job pod logs\n"
-kubectl logs $podName
-
-if [[ $status == "Completed" ]];
-then
- printf "KUD Install Job completed\n"
- printf "Checking cluster status\n"
-
- source ../../env/lib/common.sh
- CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
- APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
- TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
- if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
- then
- printf "\nKubernetes Cluster Install did not complete successfully\n"
- exit 1
- else
- printf "\nKubernetes Cluster Install was successful\n"
- fi
-
-else
- printf "KUD Install Job failed\n"
- exit 1
-fi
-
function wait_for {
local -r interval=30
for ((try=0;try<600;try+=${interval})); do
[[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get cdi --no-headers 2>/dev/null | wc -l) == 0 ]]
}
-#Apply addons
-printf "Applying KUD addons\n"
-pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
-emcoctl apply 00-controllers.yaml
-emcoctl apply 01-cluster.yaml
-emcoctl apply 02-project.yaml
-emcoctl apply 03-addons-app.yaml
-popd
-
-#Instantiate addons
-emcoctl instantiate addons
-wait_for addons_instantiated
-emcoctl instantiate networks
-wait_for networks_instantiated
-emcoctl instantiate kubevirt
-wait_for kubevirt_instantiated
-
-#Test addons
-printf "Testing KUD addons\n"
-pushd /opt/kud/multi-cluster/addons/tests
-failed_kud_tests=""
-container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
-if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
- #With containerd 1.2.13, the qat test container image fails to unpack.
- kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
-else
- kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
-fi
-for test in ${kud_tests}; do
- KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
-done
-KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
-if [[ ! -z "$failed_kud_tests" ]]; then
- printf "Test cases failed:${failed_kud_tests}\n"
- exit 1
+if [[ $1 == "provision" ]]; then
+ kubectl create -f e2etest/test_bmh_provisioning_cr.yaml
+ sleep 5
+
+ #Check Status of kud job pod
+ status="Running"
+
+ while [[ $status == "Running" ]]
+ do
+ echo "KUD install job still running"
+ sleep 2m
+ stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
+ status=$(echo $stats | cut -d " " -f 3)
+ done
+
+ #Print logs of Job Pod
+ jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
+ podName=$(echo $jobPod | cut -d " " -f 1)
+ printf "\nNow Printing Job pod logs\n"
+ kubectl logs $podName
+
+ if [[ $status == "Completed" ]];
+ then
+ printf "KUD Install Job completed\n"
+ printf "Checking cluster status\n"
+
+ source ../../env/lib/common.sh
+ CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
+ APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
+ TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
+ if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
+ then
+ printf "\nKubernetes Cluster Install did not complete successfully\n"
+ exit 1
+ else
+ printf "\nKubernetes Cluster Install was successful\n"
+ fi
+
+ else
+ printf "KUD Install Job failed\n"
+ exit 1
+ fi
+
+ #Apply addons
+ printf "Applying KUD addons\n"
+ pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+ emcoctl apply 00-controllers.yaml
+ emcoctl apply 01-cluster.yaml
+ emcoctl apply 02-project.yaml
+ emcoctl apply 03-addons-app.yaml
+ popd
+
+ #Instantiate addons
+ emcoctl instantiate addons
+ wait_for addons_instantiated
+ emcoctl instantiate networks
+ wait_for networks_instantiated
+ emcoctl instantiate kubevirt
+ wait_for kubevirt_instantiated
+
+ #Test addons
+ printf "Testing KUD addons\n"
+ pushd /opt/kud/multi-cluster/addons/tests
+ failed_kud_tests=""
+ container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
+ if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
+ #With containerd 1.2.13, the qat test container image fails to unpack.
+ kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
+ else
+ kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
+ fi
+ for test in ${kud_tests}; do
+ KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
+ done
+ KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
+ if [[ ! -z "$failed_kud_tests" ]]; then
+ printf "Test cases failed:${failed_kud_tests}\n"
+ exit 1
+ fi
+ popd
+ printf "All test cases passed\n"
+elif [[ $1 == "teardown" ]]; then
+ CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
+ #Tear down setup
+ printf "\n\nBeginning BMH E2E Test Teardown\n\n"
+ emcoctl terminate kubevirt
+ wait_for kubevirt_terminated
+ emcoctl terminate networks
+ wait_for networks_terminated
+ emcoctl terminate addons
+ wait_for addons_terminated
+ pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
+ emcoctl delete 03-addons-app.yaml
+ emcoctl delete 02-project.yaml
+ emcoctl delete 01-cluster.yaml
+ emcoctl delete 00-controllers.yaml
+ popd
+ kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml
+ kubectl delete job kud-${CLUSTER_NAME}
+ kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
+ rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
+ rm -rf /opt/kud/multi-cluster/addons
+ make delete
fi
-popd
-printf "All test cases passed\n"
-
-#Tear down setup
-printf "\n\nBeginning BMH E2E Test Teardown\n\n"
-emcoctl terminate kubevirt
-wait_for kubevirt_terminated
-emcoctl terminate networks
-wait_for networks_terminated
-emcoctl terminate addons
-wait_for addons_terminated
-pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
-emcoctl delete 03-addons-app.yaml
-emcoctl delete 02-project.yaml
-emcoctl delete 01-cluster.yaml
-emcoctl delete 00-controllers.yaml
-popd
-kubectl delete -f e2etest/test_bmh_provisioning_cr.yaml
-kubectl delete job kud-${CLUSTER_NAME}
-kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
-rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
-rm -rf /opt/kud/multi-cluster/addons
-make delete