Add KubeVirt testing using PV
[icn.git] / deploy / addons / addons.sh
1 #!/usr/bin/env bash
2 set -eux -o pipefail
3
4 SCRIPTDIR="$(readlink -f $(dirname ${BASH_SOURCE[0]}))"
5 LIBDIR="$(dirname $(dirname ${SCRIPTDIR}))/env/lib"
6
7 source $LIBDIR/logging.sh
8 source $LIBDIR/common.sh
9
10 BUILDDIR=${SCRIPTDIR/deploy/build}
11 mkdir -p ${BUILDDIR}
12
13 function install_deps {
14     apt-get install -y jq
15 }
16
17 function is_emco_ready {
18     local -r cluster_name=${CLUSTER_NAME:-icn}
19     local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
20     kubectl --kubeconfig=${cluster_kubeconfig} -n emco wait pod --all --for=condition=Ready --timeout=0s >/dev/null 2>&1
21 }
22
23 function register_emco_controllers {
24     wait_for is_emco_ready
25     local -r cluster_name=${CLUSTER_NAME:-icn}
26     local -r host=$(kubectl -n metal3 get cluster/${cluster_name} -o jsonpath='{.spec.controlPlaneEndpoint.host}')
27     cat <<EOF >${BUILDDIR}/${cluster_name}-config.yaml
28 orchestrator:
29   host: ${host}
30   port: 30415
31 EOF
32     cat <<EOF >${BUILDDIR}/${cluster_name}-controllers.yaml
33 ---
34 version: emco/v2
35 resourceContext:
36   anchor: controllers
37 metadata:
38   name: rsync
39 spec:
40   host: ${host}
41   port: 30431
42 ---
43 version: emco/v2
44 resourceContext:
45   anchor: controllers
46 metadata:
47   name: gac
48 spec:
49   host: ${host}
50   port: 30433
51   type: "action"
52   priority: 1
53 ---
54 version: emco/v2
55 resourceContext:
56   anchor: controllers
57 metadata:
58   name: ovnaction
59 spec:
60   host: ${host}
61   port: 30432
62   type: "action"
63   priority: 1
64 ---
65 version: emco/v2
66 resourceContext:
67   anchor: controllers
68 metadata:
69   name: dtc
70 spec:
71   host: ${host}
72   port: 30448
73   type: "action"
74   priority: 1
75 EOF
76     emcoctl --config ${BUILDDIR}/${cluster_name}-config.yaml apply -f ${BUILDDIR}/${cluster_name}-controllers.yaml
77 }
78
79 function unregister_emco_controllers {
80     local -r cluster_name=${CLUSTER_NAME:-icn}
81     emcoctl --config ${BUILDDIR}/${cluster_name}-config.yaml delete -f ${BUILDDIR}/${cluster_name}-controllers.yaml
82 }
83
84 function is_addon_ready {
85     local -r addon=$1
86     local -r cluster_name=${CLUSTER_NAME:-icn}
87     local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
88     if [[ $(kubectl --kubeconfig=${cluster_kubeconfig} -n kud get Kustomization/${addon} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') != "True" ]]; then
89         return 1
90     fi
91
92     # Additional addon specific checks
93     case ${addon} in
94         "cpu-manager")
95             for node in $(kubectl --kubeconfig=${cluster_kubeconfig} -n kud get pods -l app=cmk-reconcile-ds-all -o jsonpath='{range .items[*]}{.spec.nodeName}{"\n"}{end}' | sort | uniq); do
96                 kubectl --kubeconfig=${cluster_kubeconfig} get cmk-nodereport ${node}
97             done
98             ;;
99         "node-feature-discovery")
100             node_name=$(kubectl --kubeconfig=${cluster_kubeconfig} get nodes -o jsonpath='{range .items[*]}{.metadata.name} {.spec.taints[?(@.effect=="NoSchedule")].effect}{"\n"}{end}' | awk 'NF==1 {print $0;exit}')
101             kernel_version=$(kubectl --kubeconfig=${cluster_kubeconfig} get node ${node_name} -o jsonpath='{.metadata.labels.feature\.node\.kubernetes\.io/kernel-version\.major}')
102             [[ -n ${kernel_version} ]]
103             ;;
104     esac
105 }
106
107 function test_openebs {
108     local -r cluster_name=${CLUSTER_NAME:-icn}
109     local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
110     kubectl --kubeconfig=${cluster_kubeconfig} apply -f ${SCRIPTDIR}/openebs-cstor.yaml
111     kubectl --kubeconfig=${cluster_kubeconfig} wait pod hello-cstor-csi-disk-pod --for=condition=Ready --timeout=5m
112     kubectl --kubeconfig=${cluster_kubeconfig} exec -it hello-cstor-csi-disk-pod -- cat /mnt/store/greet.txt
113     kubectl --kubeconfig=${cluster_kubeconfig} delete -f ${SCRIPTDIR}/openebs-cstor.yaml
114 }
115
116 function is_vm_reachable {
117     local -r cluster_name=${CLUSTER_NAME:-icn}
118     local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
119     local -r node_port=$(kubectl --kubeconfig=${cluster_kubeconfig} -n kubevirt-test get service/test-vm-service -o jsonpath='{.spec.ports[].nodePort}')
120     local -r node=$(kubectl -n metal3 get cluster/${cluster_name} -o jsonpath='{.spec.controlPlaneEndpoint.host}')
121     sshpass -p testuser ssh testuser@${node} -p ${node_port} -- uptime
122 }
123
124 function test_kubevirt {
125     local -r cluster_name=${CLUSTER_NAME:-icn}
126     local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
127     kubectl --kubeconfig=${cluster_kubeconfig} create ns kubevirt-test
128     kubectl --kubeconfig=${cluster_kubeconfig} -n kubevirt-test create rolebinding psp:privileged-kubevirt-test --clusterrole=psp:privileged --group=system:serviceaccounts:kubevirt-test
129     kubectl --kubeconfig=${cluster_kubeconfig} apply -f ${SCRIPTDIR}/kubevirt-test.yaml
130     WAIT_FOR_TRIES=30
131     wait_for is_vm_reachable
132     kubectl --kubeconfig=${cluster_kubeconfig} delete -f ${SCRIPTDIR}/kubevirt-test.yaml
133     kubectl --kubeconfig=${cluster_kubeconfig} -n kubevirt-test delete rolebinding psp:privileged-kubevirt-test
134     kubectl --kubeconfig=${cluster_kubeconfig} delete ns kubevirt-test
135 }
136
137 function test_addons {
138     install_deps
139
140     # Create a temporary kubeconfig file for the tests
141     local -r cluster_name=${CLUSTER_NAME:-icn}
142     local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
143     clusterctl -n metal3 get kubeconfig ${cluster_name} >${cluster_kubeconfig}
144
145     clone_kud_repository
146     # The vFW test in EMCO v21.12 does not use KubeVirt, so patch the
147     # KuD test and continue to use it
148     pushd ${KUDPATH}
149     patch -p1 --forward <${SCRIPTDIR}/plugin_fw_v2.patch || true
150     popd
151
152     pushd ${KUDPATH}/kud/tests
153     failed_tests=""
154     container_runtime=$(KUBECONFIG=${cluster_kubeconfig} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
155     if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
156         # With containerd 1.2.13, the qat test container image fails to unpack.
157         kud_tests="topology-manager-sriov:sriov-network multus:multus-cni ovn4nfv:nodus-network nfd:node-feature-discovery sriov-network:sriov-network cmk:cpu-manager"
158     else
159         kud_tests="topology-manager-sriov:sriov-network multus:multus-cni ovn4nfv:nodus-network nfd:node-feature-discovery sriov-network:sriov-network qat:qat-plugin cmk:cpu-manager"
160     fi
161     for kud_test in ${kud_tests}; do
162         addon="${kud_test#*:}"
163         test="${kud_test%:*}"
164         if [[ ! -z ${addon} ]]; then
165             wait_for is_addon_ready ${addon}
166         fi
167         KUBECONFIG=${cluster_kubeconfig} bash ${test}.sh || failed_tests="${failed_tests} ${test}"
168     done
169     # The plugin_fw_v2 test needs the EMCO controllers in place
170     register_emco_controllers
171     DEMO_FOLDER=${KUDPATH}/kud/demo KUBECONFIG=${cluster_kubeconfig} bash plugin_fw_v2.sh --external || failed_tests="${failed_tests} plugin_fw_v2"
172     unregister_emco_controllers
173     popd
174
175     test_openebs || failed_tests="${failed_tests} openebs"
176     test_kubevirt || failed_tests="${failed_tests} kubevirt"
177
178     if [[ ! -z "$failed_tests" ]]; then
179         echo "Test cases failed:${failed_tests}"
180         exit 1
181     fi
182     echo "All test cases passed"
183
184     rm ${cluster_kubeconfig}
185 }
186
187 case $1 in
188     "test") test_addons ;;
189     *) cat <<EOF
190 Usage: $(basename $0) COMMAND
191
192 The "test" command looks for the CLUSTER_NAME variable in the
193 environment (default: "icn").  This should be the name of the
194 Cluster resource to execute the tests in.
195
196 Commands:
197   test  - Test the addons
198 EOF
199        ;;
200 esac