cwd=$(pwd)
current_user=$(whoami)
is_optional="false"
+pull="false"
info () {
logger -s -t "run_blu_val.info" "$*"
}
has_substring() {
- [ "$1" != "${2/$1/}" ]
+ [[ $1 =~ $2 ]]
}
change_res_owner() {
echo "[-n <blueprint_name>">&2
echo "[-b <blueprint_yaml> blueprint definition">&2
echo "[-k <k8s_config_dir> k8s config dir">&2
- echo "[-j <k8s_master> k8s master">&2
+ echo "[-j <cluster_master_ip> cluster master IP">&2
echo "[-u <ssh_user> ssh user">&2
echo "[-s <ssh_key>] path to ssh key">&2
echo "[-c <custmom_var_file> ] path to variables yaml file">&2
verify_connectivity() {
local ip=$1
info "Verifying connectivity to $ip..."
+ # shellcheck disable=SC2034
for i in $(seq 0 10); do
if ping -c 1 -W 1 "$ip" > /dev/null; then
info "$ip is reachable!"
}
# Get options from shell
-while getopts "j:k:u:s:b:l:r:n:ov:" optchar; do
+while getopts "j:k:u:s:b:l:r:n:opv:" optchar; do
case "${optchar}" in
- j) k8s_master=${OPTARG} ;;
+ j) cluster_master_ip=${OPTARG} ;;
k) k8s_config_dir=${OPTARG} ;;
s) ssh_key=${OPTARG} ;;
b) blueprint_yaml=${OPTARG} ;;
n) blueprint_name=${OPTARG} ;;
u) sh_user=${OPTARG} ;;
o) is_optional="true" ;;
+ p) pull="true" ;;
v) version=${OPTARG} ;;
*) echo "Non-option argument: '-${OPTARG}'" >&2
usage
input="$cwd/kube"
# Initialize ssh key used
-ssh_key=${ssh_key:-$K8S_SSH_KEY}
+ssh_key=${ssh_key:-$CLUSTER_SSH_KEY}
# K8s config directory
k8s_config_dir=${k8s_config_dir:-$input}
mkdir -p "$k8s_config_dir"
# Testing configuration
version=${version:-$VERSION}
results_dir=$cwd/results
-k8s_master=${k8s_master:-$K8S_MASTER_IP}
-ssh_user=${sh_user:-$K8S_SSH_USER}
+cluster_master_ip=${cluster_master_ip:-$CLUSTER_MASTER_IP}
+ssh_user=${sh_user:-$CLUSTER_SSH_USER}
blueprint_layer=${blueprint_layer:-$LAYER}
-# If blueprint layer is not defined use k8s by default
-if [ "$blueprint_layer" == "k8s" ]
+if [ "$blueprint_layer" == "k8s" ] || [ -z "$blueprint_layer" ]
then
- if [ -z "$k8s_master" ]
+ if [ -z "$cluster_master_ip" ]
then
usage
- error "Please provide valid k8s IP address."
+ error "Please provide valid IP address to access the k8s cluster."
fi
- verify_connectivity "${k8s_master}"
- if [[ -n $K8S_SSH_PASSWORD ]]
+ verify_connectivity "${cluster_master_ip}"
+ if [[ -n $CLUSTER_SSH_PASSWORD ]]
then
- sshpass -p "${K8S_SSH_PASSWORD}" scp -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -r\
- "${ssh_user}@${k8s_master}:~/.kube/*" "$k8s_config_dir"
+ sshpass -p "${CLUSTER_SSH_PASSWORD}" scp -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -r\
+ "${ssh_user}@${cluster_master_ip}:~/.kube/*" "$k8s_config_dir"
else
scp -oUserKnownHostsFile=/dev/null -oStrictHostKeyChecking=no -i"$ssh_key" -r\
- "${ssh_user}"@"${k8s_master}":~/.kube/* "$k8s_config_dir"
+ "${ssh_user}"@"${cluster_master_ip}":~/.kube/* "$k8s_config_dir"
fi
fi
fi
volumes_path="$cwd/bluval/volumes.yaml"
-#update information in volumes yaml
-sed -i -e "/kube_config_dir/{n; s@local: ''@local: '$k8s_config_dir'@}" -e "/blueprint_dir/{n; s@local: ''@local: '$cwd/bluval/'@}" -e "/results_dir/{n; s@local: ''@local: '$results_dir'@}" "$volumes_path"
+# update information in volumes yaml
+sed -i \
+ -e "/ssh_key_dir/{n; s@local: ''@local: '$cwd/ssh_key_dir'@}" \
+ -e "/kube_config_dir/{n; s@local: ''@local: '$k8s_config_dir'@}" \
+ -e "/custom_variables_file/{n; s@local: ''@local: '$cwd/tests/variables.yaml'@}" \
+ -e "/blueprint_dir/{n; s@local: ''@local: '$cwd/bluval/'@}" \
+ -e "/results_dir/{n; s@local: ''@local: '$results_dir'@}" \
+ "$volumes_path"
+
+# create ssh_key_dir
+mkdir -p "$cwd/ssh_key_dir"
+
+# copy ssh_key in ssh_key_dir
+cp "$ssh_key" "$cwd/ssh_key_dir/id_rsa"
+
+variables_path="$cwd/tests/variables.yaml"
+# update information in variables yaml
+sed -i \
+ -e "s@host: [0-9]*.[0-9]*.[0-9]*.[0-9]*@host: $cluster_master_ip@" \
+ -e "s@username: [A-Za-z0-9_]* @username: $ssh_user@" \
+ "$variables_path"
if [[ -n $blueprint_layer ]]
then
then
options+=" -o"
fi
-
-printf 'ok / PASS /\nerror / FAIL /\n' > ./bluval/rules.txt
+if [ "$pull" == "true" ] || [ "$PULL" == "yes" ]
+then
+ options+=" -p"
+fi
set +e
-# even if the script fails we need to change the owner of results
-# shellcheck disable=SC2086
-python3 bluval/blucon.py $options "$blueprint_name"
+if python3 --version > /dev/null; then
+ # shellcheck disable=SC2086
+ python3 bluval/blucon.py $options "$blueprint_name"
+else
+ # shellcheck disable=SC2086
+ VALIDATION_DIR="$WORKSPACE" RESULTS_DIR="$WORKSPACE/results" \
+ bluval/blucon.sh $options "$blueprint_name"
+fi
+# even if the script fails we need to change the owner of results
+# shellcheck disable=SC2181
if [ $? -ne 0 ]; then
change_res_owner
error "Bluval validation FAIL "
TIMESTAMP=$(date +'%Y%m%d-%H%M%S')
NEXUS_URL=https://nexus.akraino.org/
NEXUS_PATH="${LAB_SILO}/bluval_results/${blueprint_name}/${VERSION}/${TIMESTAMP}"
- BUILD_URL="${JENKINS_HOSTNAME}/job/${JOB_NAME}/${BUILD_NUMBER}/"
zip -r results.zip ./results
lftools deploy nexus-zip "$NEXUS_URL" logs "$NEXUS_PATH" results.zip
+ rm results.zip
fi
-rm results.zip
rm -f ~/.netrc
-