set -e
set -o errexit
set -o pipefail
+export PATH=$PATH:/home/jenkins/.local/bin
cwd=$(pwd)
+current_user=$(whoami)
is_optional="false"
-
-finish() {
- # Fix ownership of output files
- user_id=$(stat -c '%u:%g' $cwd)
- sudo chown -R ${user_id} $results_dir
-}
+pull="false"
info () {
logger -s -t "run_blu_val.info" "$*"
}
+has_substring() {
+ [[ $1 =~ $2 ]]
+}
+
+change_res_owner() {
+# change owner of results created by root in container
+ if [ -d "$results_dir" ]
+ then
+ sudo chown -R "$current_user" "$results_dir"
+ fi
+}
+
usage() {
- echo "usage: $0 -n <blueprint_name>" >&2
- echo "[-r <results_dir> results dir">&2
- echo "[-b <blueprint_yaml> blueprint definition">&2
- echo "[-k <k8s_config_dir> k8s config dir">&2
- echo "[-j <k8s_master> k8s master">&2
- echo "[-u <ssh_user> ssh user">&2
- echo "[-s <ssh_key>] path to ssh key">&2
+ echo "usage: $0" >&2
+ echo "[-n <blueprint_name> ]">&2
+ echo "[-b <blueprint_yaml> ] blueprint definition">&2
+ echo "[-k <k8s_config_dir> ] k8s config dir">&2
+ echo "[-j <cluster_master_ip> ] cluster master IP">&2
+ echo "[-u <ssh_user> ] ssh user">&2
+ echo "[-p <ssh_password> ] ssh password">&2
+ echo "[-s <ssh_key> ] path to ssh key">&2
echo "[-c <custmom_var_file> ] path to variables yaml file">&2
echo "[-l <layer> ] blueprint layer">&2
+ echo "[-P ] pull docker images">&2
echo "[-o ] run optional tests">&2
echo "[-v <version> ] version">&2
}
verify_connectivity() {
local ip=$1
info "Verifying connectivity to $ip..."
+ # shellcheck disable=SC2034
for i in $(seq 0 10); do
if ping -c 1 -W 1 "$ip" > /dev/null; then
info "$ip is reachable!"
}
# Get options from shell
-while getopts "j:k:u:s:b:l:r:n:ov:" optchar; do
+while getopts "j:k:u:p:s:b:l:r:n:oPv:" optchar; do
case "${optchar}" in
- j) k8s_master=${OPTARG} ;;
+ j) cluster_master_ip=${OPTARG} ;;
k) k8s_config_dir=${OPTARG} ;;
+ u) sh_user=${OPTARG} ;;
+ p) ssh_password=${OPTARG} ;;
s) ssh_key=${OPTARG} ;;
b) blueprint_yaml=${OPTARG} ;;
l) blueprint_layer=${OPTARG} ;;
- r) results_dir=${OPTARG} ;;
n) blueprint_name=${OPTARG} ;;
- u) sh_user=${OPTARG} ;;
o) is_optional="true" ;;
+ P) pull="true" ;;
v) version=${OPTARG} ;;
*) echo "Non-option argument: '-${OPTARG}'" >&2
usage
done
# Blueprint name is mandatory
+blueprint_name=${blueprint_name:-$BLUEPRINT}
if [ -z "$blueprint_name" ]
then
usage
input="$cwd/kube"
# Initialize ssh key used
-ssh_key=${ssh_key:-$K8S_SSH_KEY}
+ssh_key=${ssh_key:-$CLUSTER_SSH_KEY}
# K8s config directory
k8s_config_dir=${k8s_config_dir:-$input}
mkdir -p "$k8s_config_dir"
# Testing configuration
version=${version:-$VERSION}
-results_dir=${results_dir:-$cwd/results}
-k8s_master=${k8s_master:-$K8S_MASTER_IP}
-ssh_user=${sh_user:-$K8S_SSH_USER}
+results_dir=$cwd/results
+cluster_master_ip=${cluster_master_ip:-$CLUSTER_MASTER_IP}
+ssh_user=${sh_user:-$CLUSTER_SSH_USER}
+ssh_password=${ssh_password:-$CLUSTER_SSH_PASSWORD}
blueprint_layer=${blueprint_layer:-$LAYER}
-# If blueprint layer is not defined use k8s by default
-if [ "$blueprint_layer" == "k8s" ]
+if [ "$blueprint_layer" == "k8s" ] || [ -z "$blueprint_layer" ]
then
- if [ -z "$k8s_master" ]
+ if [ -z "$cluster_master_ip" ]
then
usage
- error "Please provide valid k8s IP address."
+ error "Please provide valid IP address to access the k8s cluster."
fi
- verify_connectivity "${k8s_master}"
- if [[ -n $K8S_SSH_PASSWORD ]]
+ verify_connectivity "${cluster_master_ip}"
+ if [[ -n ${ssh_password} ]]
then
- sshpass -p "${K8S_SSH_PASSWORD}" scp -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -r\
- "${ssh_user}@${k8s_master}:~/.kube/*" "$k8s_config_dir"
+ sshpass -p "${ssh_password}" scp -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -r\
+ "${ssh_user}@${cluster_master_ip}:~/.kube/*" "$k8s_config_dir"
else
scp -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -i"$ssh_key" -r\
- "${ssh_user}"@"${k8s_master}":~/.kube/* "$k8s_config_dir"
+ "${ssh_user}"@"${cluster_master_ip}":~/.kube/* "$k8s_config_dir"
fi
fi
-trap finish EXIT
-
-if [ ! -d "$cwd/validation" ]
+if [[ -n $blueprint_yaml ]]
then
- git clone http://gerrit.akraino.org/r/validation
+ cp "$blueprint_yaml" ./bluval/
fi
-if [[ -n $blueprint_yaml ]]
+# create ssh_key_dir
+mkdir -p "$cwd/ssh_key_dir"
+
+volumes_path="$cwd/bluval/volumes.yaml"
+# update information in volumes yaml
+sed -i \
+ -e "/ssh_key_dir/{n; s@local: ''@local: '$cwd/ssh_key_dir'@}" \
+ -e "/kube_config_dir/{n; s@local: ''@local: '$k8s_config_dir'@}" \
+ -e "/custom_variables_file/{n; s@local: ''@local: '$cwd/tests/variables.yaml'@}" \
+ -e "/blueprint_dir/{n; s@local: ''@local: '$cwd/bluval/'@}" \
+ -e "/results_dir/{n; s@local: ''@local: '$results_dir'@}" \
+ "$volumes_path"
+
+if [ -n "$ssh_key" ]
then
- cp "$blueprint_yaml" ./validation/bluval/
+ cp $ssh_key $cwd/ssh_key_dir/id_rsa
+ ssh_keyfile=/root/.ssh/id_rsa
fi
-volumes_path="$cwd/validation/bluval/volumes.yaml"
-#update information in volumes yaml
-sed -i -e "/kube_config_dir/{n; s@local: ''@local: '$k8s_config_dir'@}" -e "/blueprint_dir/{n; s@local: ''@local: '$cwd/validation/bluval/'@}" -e "/results_dir/{n; s@local: ''@local: '$results_dir'@}" "$volumes_path"
+variables_path="$cwd/tests/variables.yaml"
+# update information in variables yaml
+sed -i \
+ -e "s@host: [0-9]*.[0-9]*.[0-9]*.[0-9]*@host: $cluster_master_ip@" \
+ -e "s@username: [A-Za-z0-9_]* @username: $ssh_user@" \
+ -e "s@password: [A-Za-z0-9_]* @password: $ssh_password@" \
+ -e "s@ssh_keyfile: [A-Za-z0-9_]* @ssh_keyfile: $ssh_keyfile@" \
+ "$variables_path"
if [[ -n $blueprint_layer ]]
then
then
options+=" -o"
fi
-# shellcheck disable=SC2086
-python3 validation/bluval/blucon.py $options "$blueprint_name"
+if [ "$pull" == "true" ] || [ "$PULL" == "yes" ]
+then
+ options+=" -P"
+fi
+
+set +e
+if python3 --version > /dev/null; then
+ # shellcheck disable=SC2086
+ python3 bluval/blucon.py $options "$blueprint_name"
+else
+ # shellcheck disable=SC2086
+ VALIDATION_DIR="$WORKSPACE" RESULTS_DIR="$WORKSPACE/results" \
+ bluval/blucon.sh $options "$blueprint_name"
+fi
+
+# even if the script fails we need to change the owner of results
+# shellcheck disable=SC2181
+if [ $? -ne 0 ]; then
+ change_res_owner
+ error "Bluval validation FAIL "
+fi
+set -e
+
+change_res_owner
+if has_substring "$NODE_NAME" "snd-"
+then
+ echo "In sandbox the logs are not pushed"
+else
+ TIMESTAMP=$(date +'%Y%m%d-%H%M%S')
+ NEXUS_URL=https://nexus.akraino.org/
+ NEXUS_PATH="${LAB_SILO}/bluval_results/${blueprint_name}/${VERSION}/${TIMESTAMP}"
+ zip -r results.zip ./results
+ lftools deploy nexus-zip "$NEXUS_URL" logs "$NEXUS_PATH" results.zip
+ rm results.zip
+fi
+
+rm -f ~/.netrc