- shell: |2-
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum -y install docker-ce docker-ce-cli containerd.io
+ sudo mkdir -p /etc/docker/
+ echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json
sudo systemctl start docker
# Ugly hack to make docker usable for non-root
# (adding to the group would require re-login)
mkdir -p $COMPASS_WORK_DIR
ln -s $COMPASS_WORK_DIR work
-sudo docker rm -f `docker ps | grep compass | cut -f1 -d' '` || true
+sudo docker rm -f `sudo docker ps | grep compass | cut -f1 -d' '` || true
curl -s http://people.linaro.org/~yibo.cai/compass/compass4nfv-arm64-fixup.sh | bash || true
# Create 3 virtual machine
echo -e " - name: host3\n roles:\n - kube_node" >> deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml
# Remove useless code
-sed -i "33,90d" deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml
+# The ansible-kubernetes.yml file contains the list of softwares which will
+# be installed on VM. But for IEC projects, some parts are not essnetial. So
+# useless part will be removed.
+# Delete some contents from line 28 to end.
+sed -i '28,$d' deploy/adapters/ansible/kubernetes/ansible-kubernetes.yml
export ADAPTER_OS_PATTERN='(?i)ubuntu-16.04.*arm.*'
export OS_VERSION="xenial"
export KUBERNETES_VERSION="v1.13.0"
-if [[ "$NODE_NAME" =~ "-virtual" ]]; then
- export DHA="deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml"
- export NETWORK="deploy/conf/vm_environment/network.yml"
- export VIRT_NUMBER=3 VIRT_CPUS=2 VIRT_MEM=4096 VIRT_DISK=50G
-fi
+
+export DHA="deploy/conf/vm_environment/k8-nosdn-nofeature-noha.yml"
+export NETWORK="deploy/conf/vm_environment/network.yml"
+export VIRT_NUMBER=3 VIRT_CPUS=4 VIRT_MEM=12288 VIRT_DISK=50G
./deploy.sh
sed -i "s/10.169.40.106,123456/10.1.0.51,root\"\n\"10.1.0.52,root/g" ./config
HOST_USER=root
export HOST_USER
-sudo ./startup.sh
+./startup.sh
exit 0
K8S_SSH_KEY=${SSH_KEY}
IEC_DIR="/home/${K8S_SSH_USER}/iec"
;;
+ *compass*)
+ K8S_MASTER_IP=${K8S_MASTER_IP_COMPASS}
+ K8S_SSH_USER=${K8S_SSH_USER_COMPASS}
+ K8S_SSH_PASSWORD=${K8S_SSH_PASSWORD_COMPASS}
+ IEC_DIR="/${K8S_SSH_USER_COMPASS}/iec"
+ ;;
*)
echo "Cannot determine installer from ${JOB_NAME}"
exit 1
if [ -n "${K8S_SSH_PASSWORD}" ]
then
- sshpass -o StrictHostKeyChecking=no -p "${K8S_SSH_PASSWORD}" \
+ sshpass -p "${K8S_SSH_PASSWORD}" ssh -o StrictHostKeyChecking=no \
"${K8S_SSH_USER}"@"${K8S_MASTER_IP}" "${INSTALL_CMD}"
elif [ -n "${K8S_SSH_KEY}" ]
then
name: K8S_SSH_KEY
default: ''
description: 'SSH key to connect to the K8s master'
+ - string:
+ name: K8S_MASTER_IP_COMPASS
+ default: '10.1.0.50'
+ description: 'IP address to compass connect to the K8s master'
+ - string:
+ name: K8S_SSH_PASSWORD_COMPASS
+ default: 'root'
+ description: 'SSH password to compass connect to the K8s master'
+ - string:
+ name: K8S_SSH_USER_COMPASS
+ default: 'root'
+ description: 'SSH username to compass connect to the K8s master'
########################
# job templates
set -e -u -x -o pipefail
export PATH=$PATH:/usr/local/go/bin:/usr/local/bin
-KNI_PATH='go/src/gerrit.akraino.org/kni'
+KNI_PATH='src/gerrit.akraino.org/kni/installer'
echo '---> Starting kni installer generation'
-mkdir -p $HOME/${KNI_PATH}/installer
+# move to right directory in GOPATH
+mkdir -p ${WORKSPACE}/${KNI_PATH}
export GOPATH=${WORKSPACE}
+mv cmd pkg vendor ${WORKSPACE}/${KNI_PATH}/
# do a host preparation and cleanup
bash utils/prep_host.sh
# now build the openshift-install binary and copy to gopath
make binary 2>&1 | tee ${WORKSPACE}/binary.log
-# then start aws deploy
+# then start libvirt deploy
export MASTER_MEMORY_MB=24000
export CREDENTIALS=file://$(pwd)/akraino-secrets
export BASE_REPO="git::https://gerrit.akraino.org/r/kni/templates"
-export BASE_PATH="libvirt/3-node"
+export BASE_PATH="libvirt/1-node"
export SITE_REPO="git::https://gerrit.akraino.org/r/kni/templates"
export SETTINGS_PATH="libvirt/sample_settings.yaml"
-export INSTALLER_PATH="file://${HOME}/${KNI_PATH}/installer/bin/openshift-install"
+export INSTALLER_PATH="file://${WORKSPACE}/bin/openshift-install"
make deploy 2>&1 | tee ${WORKSPACE}/libvirt_deploy.log
STATUS=$?
- akraino-project-stream-verify
views:
- project-view
+ triggers:
+ - 'starlingx-weekly-master-trigger'
+
+- trigger:
+ name: 'starlingx-weekly-master-trigger'
+ triggers:
+ - timed: '0 11 * * 0'
--- /dev/null
+---
+- project:
+ name: ta-remote-installer
+ project: ta/remote-installer
+ project-name: ta-remote-installer
+ global-settings-file: global-settings
+ build-node: ubuntu1604-docker-8c-8g
+ stream: master
+ java-version: openjdk8
+ mvn-version: mvn35
+ mvn-opts: ''
+ mvn-params: ''
+ mvn-settings: ta-settings
+ build-timeout: 60
+ branch: master
+ submodule-recursive: true
+ jobs:
+ - 'akraino-project-stream-docker'
+ - 'akraino-project-stream-stage-docker'
+