X-Git-Url: https://gerrit.akraino.org/r/gitweb?a=blobdiff_plain;f=deploy%2Faddons%2Faddons.sh;h=fcaafc9dd065d443714cc176a176f283c1c91daf;hb=HEAD;hp=9f27982e2988d0efa9a9bd3b1777b10ea11e2350;hpb=275eecaa965404bb60bc47b469388276076acf8f;p=icn.git diff --git a/deploy/addons/addons.sh b/deploy/addons/addons.sh index 9f27982..fcaafc9 100755 --- a/deploy/addons/addons.sh +++ b/deploy/addons/addons.sh @@ -38,7 +38,7 @@ metadata: name: rsync spec: host: ${host} - port: 30441 + port: 30431 --- version: emco/v2 resourceContext: @@ -47,7 +47,7 @@ metadata: name: gac spec: host: ${host} - port: 30493 + port: 30433 type: "action" priority: 1 --- @@ -58,7 +58,7 @@ metadata: name: ovnaction spec: host: ${host} - port: 30473 + port: 30432 type: "action" priority: 1 --- @@ -69,7 +69,7 @@ metadata: name: dtc spec: host: ${host} - port: 30483 + port: 30448 type: "action" priority: 1 EOF @@ -85,7 +85,53 @@ function is_addon_ready { local -r addon=$1 local -r cluster_name=${CLUSTER_NAME:-icn} local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf" - [[ $(kubectl --kubeconfig=${cluster_kubeconfig} -n kud get HelmRelease/${addon} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') == "True" ]] + if [[ $(kubectl --kubeconfig=${cluster_kubeconfig} -n kud get Kustomization/${addon} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') != "True" ]]; then + return 1 + fi + + # Additional addon specific checks + case ${addon} in + "cpu-manager") + for node in $(kubectl --kubeconfig=${cluster_kubeconfig} -n kud get pods -l app=cmk-reconcile-ds-all -o jsonpath='{range .items[*]}{.spec.nodeName}{"\n"}{end}' | sort | uniq); do + kubectl --kubeconfig=${cluster_kubeconfig} get cmk-nodereport ${node} + done + ;; + "node-feature-discovery") + node_name=$(kubectl --kubeconfig=${cluster_kubeconfig} get nodes -o jsonpath='{range .items[*]}{.metadata.name} {.spec.taints[?(@.effect=="NoSchedule")].effect}{"\n"}{end}' | awk 'NF==1 {print $0;exit}') + kernel_version=$(kubectl --kubeconfig=${cluster_kubeconfig} get node ${node_name} -o jsonpath='{.metadata.labels.feature\.node\.kubernetes\.io/kernel-version\.major}') + [[ -n ${kernel_version} ]] + ;; + esac +} + +function test_openebs { + local -r cluster_name=${CLUSTER_NAME:-icn} + local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf" + kubectl --kubeconfig=${cluster_kubeconfig} apply -f ${SCRIPTDIR}/openebs-cstor.yaml + kubectl --kubeconfig=${cluster_kubeconfig} wait pod hello-cstor-csi-disk-pod --for=condition=Ready --timeout=5m + kubectl --kubeconfig=${cluster_kubeconfig} exec -it hello-cstor-csi-disk-pod -- cat /mnt/store/greet.txt + kubectl --kubeconfig=${cluster_kubeconfig} delete -f ${SCRIPTDIR}/openebs-cstor.yaml +} + +function is_vm_reachable { + local -r cluster_name=${CLUSTER_NAME:-icn} + local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf" + local -r node_port=$(kubectl --kubeconfig=${cluster_kubeconfig} -n kubevirt-test get service/test-vm-service -o jsonpath='{.spec.ports[].nodePort}') + local -r node=$(kubectl -n metal3 get cluster/${cluster_name} -o jsonpath='{.spec.controlPlaneEndpoint.host}') + sshpass -p testuser ssh testuser@${node} -p ${node_port} -- uptime +} + +function test_kubevirt { + local -r cluster_name=${CLUSTER_NAME:-icn} + local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf" + kubectl --kubeconfig=${cluster_kubeconfig} create ns kubevirt-test + kubectl --kubeconfig=${cluster_kubeconfig} -n kubevirt-test create rolebinding psp:privileged-kubevirt-test --clusterrole=psp:privileged --group=system:serviceaccounts:kubevirt-test + kubectl --kubeconfig=${cluster_kubeconfig} apply -f ${SCRIPTDIR}/kubevirt-test.yaml + WAIT_FOR_TRIES=30 + wait_for is_vm_reachable + kubectl --kubeconfig=${cluster_kubeconfig} delete -f ${SCRIPTDIR}/kubevirt-test.yaml + kubectl --kubeconfig=${cluster_kubeconfig} -n kubevirt-test delete rolebinding psp:privileged-kubevirt-test + kubectl --kubeconfig=${cluster_kubeconfig} delete ns kubevirt-test } function test_addons { @@ -104,17 +150,13 @@ function test_addons { popd pushd ${KUDPATH}/kud/tests - failed_kud_tests="" + failed_tests="" container_runtime=$(KUBECONFIG=${cluster_kubeconfig} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}') - # TODO Temporarily remove kubevirt from kud_tests below. The - # kubevirt self-test needs AllowTcpForwarding yes in - # /etc/ssh/sshd_config which is currently disabled by the OS - # security hardening. if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then # With containerd 1.2.13, the qat test container image fails to unpack. - kud_tests="topology-manager-sriov:sriov-network multus:multus-cni ovn4nfv:ovn4nfv-network nfd:node-feature-discovery sriov-network:sriov-network cmk:cpu-manager" + kud_tests="topology-manager-sriov:sriov-network multus:multus-cni ovn4nfv:nodus-network nfd:node-feature-discovery sriov-network:sriov-network cmk:cpu-manager" else - kud_tests="topology-manager-sriov:sriov-network multus:multus-cni ovn4nfv:ovn4nfv-network nfd:node-feature-discovery sriov-network:sriov-network qat:qat-device-plugin cmk:cpu-manager" + kud_tests="topology-manager-sriov:sriov-network multus:multus-cni ovn4nfv:nodus-network nfd:node-feature-discovery sriov-network:sriov-network qat:qat-plugin cmk:cpu-manager" fi for kud_test in ${kud_tests}; do addon="${kud_test#*:}" @@ -122,15 +164,19 @@ function test_addons { if [[ ! -z ${addon} ]]; then wait_for is_addon_ready ${addon} fi - KUBECONFIG=${cluster_kubeconfig} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}" + KUBECONFIG=${cluster_kubeconfig} bash ${test}.sh || failed_tests="${failed_tests} ${test}" done # The plugin_fw_v2 test needs the EMCO controllers in place register_emco_controllers - DEMO_FOLDER=${KUDPATH}/kud/demo KUBECONFIG=${cluster_kubeconfig} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2" + DEMO_FOLDER=${KUDPATH}/kud/demo KUBECONFIG=${cluster_kubeconfig} bash plugin_fw_v2.sh --external || failed_tests="${failed_tests} plugin_fw_v2" unregister_emco_controllers popd - if [[ ! -z "$failed_kud_tests" ]]; then - echo "Test cases failed:${failed_kud_tests}" + + test_openebs || failed_tests="${failed_tests} openebs" + test_kubevirt || failed_tests="${failed_tests} kubevirt" + + if [[ ! -z "$failed_tests" ]]; then + echo "Test cases failed:${failed_tests}" exit 1 fi echo "All test cases passed"