X-Git-Url: https://gerrit.akraino.org/r/gitweb?a=blobdiff_plain;f=deploy%2Faddons%2Faddons.sh;h=9f27982e2988d0efa9a9bd3b1777b10ea11e2350;hb=790dc20efb05a3113b97800926a80753e9a89e98;hp=c3fefe9af6594f1d4ab2ce28a7b7c2f0b6b791d2;hpb=47623705343c39416af4b272ff01b36ae097ddb7;p=icn.git diff --git a/deploy/addons/addons.sh b/deploy/addons/addons.sh index c3fefe9..9f27982 100755 --- a/deploy/addons/addons.sh +++ b/deploy/addons/addons.sh @@ -10,8 +10,19 @@ source $LIBDIR/common.sh BUILDDIR=${SCRIPTDIR/deploy/build} mkdir -p ${BUILDDIR} +function install_deps { + apt-get install -y jq +} + +function is_emco_ready { + local -r cluster_name=${CLUSTER_NAME:-icn} + local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf" + kubectl --kubeconfig=${cluster_kubeconfig} -n emco wait pod --all --for=condition=Ready --timeout=0s >/dev/null 2>&1 +} + function register_emco_controllers { - local -r cluster_name=${CLUSTER_NAME:-e2etest} + wait_for is_emco_ready + local -r cluster_name=${CLUSTER_NAME:-icn} local -r host=$(kubectl -n metal3 get cluster/${cluster_name} -o jsonpath='{.spec.controlPlaneEndpoint.host}') cat <${BUILDDIR}/${cluster_name}-config.yaml orchestrator: @@ -66,27 +77,51 @@ EOF } function unregister_emco_controllers { - local -r cluster_name=${CLUSTER_NAME:-e2etest} + local -r cluster_name=${CLUSTER_NAME:-icn} emcoctl --config ${BUILDDIR}/${cluster_name}-config.yaml delete -f ${BUILDDIR}/${cluster_name}-controllers.yaml } +function is_addon_ready { + local -r addon=$1 + local -r cluster_name=${CLUSTER_NAME:-icn} + local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf" + [[ $(kubectl --kubeconfig=${cluster_kubeconfig} -n kud get HelmRelease/${addon} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') == "True" ]] +} + function test_addons { + install_deps + # Create a temporary kubeconfig file for the tests - local -r cluster_name=${CLUSTER_NAME:-e2etest} + local -r cluster_name=${CLUSTER_NAME:-icn} local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf" clusterctl -n metal3 get kubeconfig ${cluster_name} >${cluster_kubeconfig} clone_kud_repository + # The vFW test in EMCO v21.12 does not use KubeVirt, so patch the + # KuD test and continue to use it + pushd ${KUDPATH} + patch -p1 --forward <${SCRIPTDIR}/plugin_fw_v2.patch || true + popd + pushd ${KUDPATH}/kud/tests failed_kud_tests="" container_runtime=$(KUBECONFIG=${cluster_kubeconfig} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}') + # TODO Temporarily remove kubevirt from kud_tests below. The + # kubevirt self-test needs AllowTcpForwarding yes in + # /etc/ssh/sshd_config which is currently disabled by the OS + # security hardening. if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then # With containerd 1.2.13, the qat test container image fails to unpack. - kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk" + kud_tests="topology-manager-sriov:sriov-network multus:multus-cni ovn4nfv:ovn4nfv-network nfd:node-feature-discovery sriov-network:sriov-network cmk:cpu-manager" else - kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk" + kud_tests="topology-manager-sriov:sriov-network multus:multus-cni ovn4nfv:ovn4nfv-network nfd:node-feature-discovery sriov-network:sriov-network qat:qat-device-plugin cmk:cpu-manager" fi - for test in ${kud_tests}; do + for kud_test in ${kud_tests}; do + addon="${kud_test#*:}" + test="${kud_test%:*}" + if [[ ! -z ${addon} ]]; then + wait_for is_addon_ready ${addon} + fi KUBECONFIG=${cluster_kubeconfig} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}" done # The plugin_fw_v2 test needs the EMCO controllers in place @@ -109,7 +144,7 @@ case $1 in Usage: $(basename $0) COMMAND The "test" command looks for the CLUSTER_NAME variable in the -environment (default: "e2etest"). This should be the name of the +environment (default: "icn"). This should be the name of the Cluster resource to execute the tests in. Commands: