# by vagrant from the jump machine definition) is up.
site = ENV['ICN_SITE'] || 'vm'
+with_jenkins = ENV['WITH_JENKINS'] || false
# Calculate the baremetal network address from the bmcAddress (aka
# IPMI address) specified in the machine pool values. IPMI in the
libvirt.graphics_ip = '0.0.0.0'
libvirt.default_prefix = "#{site}-"
libvirt.cpu_mode = 'host-passthrough'
- libvirt.cpus = 8
- libvirt.memory = 24576
+ if with_jenkins
+ # With Jenkins and nested VMs increase cpus, memory
+ libvirt.cpus = 32
+ libvirt.memory = 65536
+ else
+ libvirt.cpus = 8
+ libvirt.memory = 24576
+ end
libvirt.nested = true
# The ICN baremetal network is the vagrant management network,
DEBIAN_FRONTEND=noninteractive apt-get install -y make
SHELL
m.vm.post_up_message = $post_up_message
+
+ if with_jenkins
+ # Set up a port forward for an instance of Jenkins
+ m.vm.network "forwarded_port", guest: 8080, host: 8080
+ end
end
# Look for any HelmReleases in the site directory with machineName in
`admin/admin`. To override it, please set `jenkins_admin_username` and
`jenkins_admin_password`.
-1. Fetch the source for ICN and Akraino CI management.The ICN jenkins
-job macros require that the the icn and ci-management directories are
-peers.
+1. If deploying the Jenkins server on a machine configured with KuD
+ (i.e. an ICN jump server), first remove the `ANSIBLE_CONFIG` line
+ from `/etc/environment` and login again.
-** Note: Switch the branch of the repositories below as needed.**
+ ``` shell
+ ./ci.sh cleanup-after-kud
+ logout
+ ```
-``` shell
-git clone https://gerrit.akraino.org/r/icn
-git clone --recursive https://gerrit.akraino.org/r/ci-management
-```
+2. Install the Jenkins server into the machine. If the VM verifier
+ Jenkins job will not be added later, set `WITH_VAGRANT=no` in the
+ environment before running the install step.
-2. Install Jenkins.
+ ``` shell
+ # Use one of the following
+ WITH_VAGRANT=no ./ci.sh install-jenkins
+ ./ci.sh install-jenkins
+ ```
-```shell
-cd icn/ci
-sudo -H ./install_ansible.sh
-sudo -H ansible-playbook site_jenkins.yaml --extra-vars "@vars.yaml" -v
-```
+ After the script has completed, the Jenkins server can be visited
+ at http://<listen_address>:8080.
-Once the playbook is successful, we can visit the Jenkins server at
-http://<listen_address>:8080.
+3. Add the Gerrit ssh key as Jenkins credential, so that the jobs can
+ pull code from Gerrit. `JENKINS_SSH_PRIVATE_KEY` is the path to the
+ private key file of the `icn.jenkins` Gerrit account.
-## What to do next
+ ``` shell
+ JENKINS_SSH_PRIVATE_KEY="path/to/icn.jenkins/id_rsa"
+ ./ci.sh install-credentials
+ ```
-1. Add the gerrit ssh key as Jenkins credential, so that our jobs can
-pull code from the gerrit. The credential ID field must be
-`jenkins-ssh`, as this is hard coded in the jobs. The type should be
-private key. The user name is the gerrit account name.
+ To use a different account, edit `git-url` in `jjb/defaults.yaml`
+ with the account name and execute the above command with the
+ username specified.
-2. To push the logs to Akraino Nexus server, we need to create the
-authentication file for lftools. The file should be owned by Jenkins
-user. The file path is `/var/lib/jenkins/.netrc` and the content
-should be one line `machine nexus.akraino.org login the_name password
-the_pass`
+ ``` shell
+ JENKINS_SSH_USERNAME="username"
+ JENKINS_SSH_PRIVATE_KEY="path/to/username/id_rsa"
+ ./ci.sh install-credentials
+ ```
-3. The last step is to deploy our CD jobs by jenkins-job-builder tool.
+4. To push the logs to Akraino Nexus server, we need to create the
+ authentication file for lftools.
-Basic Jenkins Job Builder (JJB) configuration using admin/admin
-credentials.
+ ``` shell
+ JENKINS_LFTOOLS_USERNAME="username"
+ JENKINS_LFTOOLS_PASSWORD="password"
+ ./ci.sh install-lftools-credentials
+ ```
-``` shell
-mkdir -p ~/.config/jenkins_jobs
-cat << EOF | tee ~/.config/jenkins_jobs/jenkins_jobs.ini
-[job_builder]
-ignore_cache=True
-keep_descriptions=False
-recursive=True
-retain_anchors=True
-update=jobs
-
-[jenkins]
-user=admin
-password=admin
-url=http://localhost:8080
-EOF
-```
+5. Add the ICN Jenkins jobs to Jenkins. The script adds only a subset
+ of the available jobs; review the script for information about
+ other jobs.
-Install jenkins-job-builder.
+ ``` shell
+ ./ci.sh update-jobs
+ ```
-``` shell
-sudo -H pip3 install jenkins-job-builder
-```
+## Job specific instructions
+
+### icn-bluval
-Install the job into Jenkins. The test command displays the output of
-the job builder that will be installed into Jenkins; it is optional.
+The Bluval job requires that Jenkins ssh into the cluster control
+plane. The script can be used to create a new keypair for the
+`jenkins` user and install the credentials into an existing cluster.
+
+For example, where the control plane endpoint is at `192.168.151.254`
+and there exists `/home/ubuntu/.kube/config`:
``` shell
-jenkins-jobs test ci-management/jjb:icn/ci/jjb icn-master-verify
-jenkins-jobs update ci-management/jjb:icn/ci/jjb icn-master-verify
+CLUSTER_MASTER_IP=192.168.151.254
+CLUSTER_SSH_USER=root
+./ci.sh install-jenkins-id
```
+
+The same values of `CLUSTER_MASTER_IP` and `CLUSTER_SSH_USER` should
+be provided to the icn-bluval job in Jenkins. Note that
+`CLUSTER_SSH_USER` must be `root` for the Bluval Lynis testing to
+succeed.
--- /dev/null
+#!/usr/bin/env bash
+set -eux -o pipefail
+
+SCRIPT_DIR="$(readlink -f $(dirname ${BASH_SOURCE[0]}))"
+
+WITH_VAGRANT="${WITH_VAGRANT:-yes}"
+JENKINS_HOSTNAME="${JENKINS_HOSTNAME:-localhost}"
+JENKINS_ADMIN_USERNAME="${JENKINS_ADMIN_USERNAME:-admin}"
+JENKINS_ADMIN_PASSWORD="${JENKINS_ADMIN_PASSWORD:-admin}"
+JENKINS_LFTOOLS_USERNAME="${JENKINS_LFTOOLS_USERNAME:-icn.jenkins}"
+JENKINS_SSH_USERNAME="${JENKINS_SSH_USERNAME:-icn.jenkins}"
+CLUSTER_MASTER_IP="${CLUSTER_MASTER_IP:-localhost}"
+CLUSTER_SSH_USER="${CLUSTER_SSH_USER:-root}"
+
+BUILD_DIR=${SCRIPT_DIR/icn/icn/build}
+mkdir -p ${BUILD_DIR}
+
+ICN_DIR="${SCRIPT_DIR}/.."
+# The ci-management repo must be a sibling of the icn repo
+CI_MANAGEMENT_DIR="${ICN_DIR}/../ci-management"
+JJB_PATH="${CI_MANAGEMENT_DIR}/jjb:${ICN_DIR}/ci/jjb"
+
+# Workaround for KuD installer which messes with /etc/environment
+function cleanup_after_kud {
+ sed -i -e '/ANSIBLE_CONFIG/d' /etc/environment
+}
+
+function install_jenkins {
+ # Prerequisites
+ export DEBIAN_FRONTEND=noninteractive
+ apt-get update
+ apt-get install -y ca-certificates python3-pip
+ pip3 install ansible jenkins-job-builder lftools
+
+ # Jenkins
+ ansible-galaxy install -r ${SCRIPT_DIR}/galaxy-requirements.yaml --roles-path /etc/ansible/roles
+ ansible-playbook ${SCRIPT_DIR}/site_jenkins.yaml --extra-vars "@${SCRIPT_DIR}/vars.yaml" -v
+
+ # The Bluval job requires docker access
+ usermod -aG docker jenkins
+
+ # Restart Jenkins to take into account any group changes above
+ systemctl restart jenkins
+
+ # Jenkins jobs
+ mkdir -p ${HOME}/.config/jenkins_jobs
+ cp ${SCRIPT_DIR}/jenkins_jobs.ini ${HOME}/.config/jenkins_jobs/jenkins_jobs.ini
+ git clone --recursive https://gerrit.akraino.org/r/ci-management "${CI_MANAGEMENT_DIR}"
+}
+
+function install_credentials {
+ if [[ ! -f ${JENKINS_SSH_PRIVATE_KEY} ]]; then
+ echo "JENKINS_SSH_PRIVATE_KEY must be set to the path of the private key of ${JENKINS_SSH_USERNAME}"
+ exit 1
+ fi
+
+ wget http://${JENKINS_HOSTNAME}:8080/jnlpJars/jenkins-cli.jar -O ${BUILD_DIR}/jenkins-cli.jar
+ cat <<EOF >${BUILD_DIR}/jenkins-ssh-credential.xml
+<com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey plugin="ssh-credentials@1.13">
+ <scope>GLOBAL</scope>
+ <id>jenkins-ssh</id>
+ <description></description>
+ <username>${JENKINS_SSH_USERNAME}</username>
+ <privateKeySource class="com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey\$DirectEntryPrivateKeySource">
+ <privateKey>$(cat ${JENKINS_SSH_PRIVATE_KEY})</privateKey>
+ </privateKeySource>
+</com.cloudbees.jenkins.plugins.sshcredentials.impl.BasicSSHUserPrivateKey>
+EOF
+ java -jar ${BUILD_DIR}/jenkins-cli.jar -auth ${JENKINS_ADMIN_USERNAME}:${JENKINS_ADMIN_PASSWORD} -s http://${JENKINS_HOSTNAME}:8080/ create-credentials-by-xml system::system::jenkins _ <${BUILD_DIR}/jenkins-ssh-credential.xml
+}
+
+function install_lftools_credentials {
+ cat <<EOF >/var/lib/jenkins/.netrc
+machine nexus.akraino.org login ${JENKINS_LFTOOLS_USERNAME} password ${JENKINS_LFTOOLS_PASSWORD}
+EOF
+ chown jenkins:jenkins /var/lib/jenkins/.netrc
+ chmod 0600 /var/lib/jenkins/.netrc
+}
+
+function update_jobs {
+ # TODO Find a better way to do this without touching files under
+ # source control
+ sed -i -e "s!git-url: .*!git-url: 'ssh://${JENKINS_SSH_USERNAME}@gerrit.akraino.org:29418'!" ${SCRIPT_DIR}/jjb/defaults.yaml
+ sed -i -e "s!bluval-cluster-master-ip: .*!bluval-cluster-master-ip: ${CLUSTER_MASTER_IP}!" ${SCRIPT_DIR}/jjb/defaults.yaml
+ sed -i -e "s!bluval-cluster-ssh-user: .*!bluval-cluster-ssh-user: ${CLUSTER_SSH_USER}!" ${SCRIPT_DIR}/jjb/defaults.yaml
+
+ # This will create all 344 jobs:
+ # jenkins-jobs update ${JJB_PATH}:${ICN_DIR}/ci/jjb/project.yaml
+
+ # These are the ICN jobs we are interested in (note intentional misspelling of bm_verifer):
+ if [[ ${WITH_VAGRANT} == "yes" ]]; then
+ jenkins-jobs update ${JJB_PATH} icn-master-vagrant-verify-verifier
+ fi
+ jenkins-jobs update ${JJB_PATH} icn-master-bm-verify-bm_verifer
+ jenkins-jobs update ${JJB_PATH} icn-bluval-daily-master
+ #jenkins-jobs update ${JJB_PATH} icn-master-verify
+
+ # These are additional ICN jobs:
+ # if [[ ${WITH_VAGRANT} == "yes" ]]; then
+ # jenkins-jobs update ${JJB_PATH} icn-master-vagrant-verify-verify_nestedk8s
+ # fi
+ # jenkins-jobs update ${JJB_PATH} icn-master-bm-verify-bm_verify_nestedk8s
+ # jenkins-jobs update ${JJB_PATH} icn-master-bm-verify-kud_bm_verifier
+}
+
+function install_jenkins_id {
+ # Create a new key if one does not exist
+ ssh-keygen -q -t rsa -N '' -f /var/lib/jenkins/jenkins-rsa -C jenkins@$(hostname) <<<n >/dev/null 2>&1 || true
+ ssh-copy-id -i /var/lib/jenkins/jenkins-rsa -f ${CLUSTER_SSH_USER}@${CLUSTER_MASTER_IP}
+ chown jenkins:jenkins /var/lib/jenkins/jenkins-rsa*
+ chmod 600 /var/lib/jenkins/jenkins-rsa*
+}
+
+case $1 in
+ "cleanup-after-kud") cleanup_after_kud ;;
+ "install-credentials") install_credentials ;;
+ "install-jenkins") install_jenkins ;;
+ "install-jenkins-id") install_jenkins_id ;;
+ "install-lftools-credentials") install_lftools_credentials ;;
+ "update-jobs") update_jobs ;;
+ *) cat <<EOF
+Usage: $(basename $0) COMMAND
+
+Commands:
+ cleanup-after-kud - Cleanup after KuD install
+ install-credentials - Install credentials into Jenkins
+ install-jenkins - Install Jenkins
+ install-jenkins-id - Install Jenkins ID into test cluster
+ install-lftools-credentials - Install lftools credentials
+ update-jobs - Install or update ICN jobs into Jenkins
+
+Environment variables used by the commands:
+ WITH_VAGRANT=[yes|no] - Install components needed to run the VM
+ verifier job
+ JENKINS_HOSTNAME - jenkins_hostname in vars.yaml
+ JENKINS_ADMIN_USERNAME - jenkins_admin_username in vars.yaml
+ JENKINS_ADMIN_PASSWORD - jenkins_admin_password in vars.yaml
+ JENKINS_LFTOOLS_USERNAME - The .netrc login
+ JENKINS_LFTOOLS_PASSWORD - The .netrc password
+ JENKINS_SSH_USERNAME - The gerrit account username
+ JENKINS_SSH_PRIVATE_KEY - The gerrit account private key file
+ CLUSTER_MASTER_IP - The cluster under test
+ CLUSTER_SSH_USER - The cluster account username
+EOF
+ ;;
+esac
--- /dev/null
+- src: geerlingguy.jenkins
+ version: 4.3.0
+- src: geerlingguy.java
+ version: 2.0.0
--- /dev/null
+[job_builder]
+ignore_cache=True
+keep_descriptions=False
+recursive=True
+retain_anchors=True
+update=jobs
+
+[jenkins]
+user=admin
+password=admin
+url=http://localhost:8080
unstable-on-warning: false
fail-on-error: true
show-graphs: true
- - postbuildscript:
- mark-unstable-if-failed: true
- builders:
- - build-on:
- - SUCCESS
- - UNSTABLE
- - FAILURE
- build-steps:
- - shell: !include-raw-escape:
- shell/bluval-postbuild.sh
disabled: false
validation_lab:
- intel
- build-node: master
+ build-node: built-in
jobs:
- icn-bluval-run-daily-tests
+++ /dev/null
-#!/bin/bash
-set -e
-set -o errexit
-set -o pipefail
-
-echo "[ICN] Uninstalling EMCO k8s"
-cd k8s/kud/hosting_providers/vagrant
-version=$(grep "kubespray_version" ../../deployment_infra/playbooks/kud-vars.yml | awk -F ': ' '{print $2}')
-ansible-playbook -i inventory/hosts.ini /opt/kubespray-${version}/reset.yml --become --become-user=root -e reset_confirmation=yes
-
-echo "[ICN] Purging Docker fully"
-cat << EOF | tee purge-docker.yml
----
-- hosts: all
- gather_facts: True
- tasks:
- - name: reset | remove all docker images
- shell: "/usr/bin/docker image ls -a -q | xargs -r /usr/bin/docker rmi -f"
- retries: 2
- delay: 5
- tags:
- - docker
- - name: reset | remove docker itself
- shell: "apt-get purge docker-* -y --allow-change-held-packages"
- retries: 2
- delay: 30
- tags:
- - docker
-EOF
-ansible-playbook -i inventory/hosts.ini purge-docker.yml --become --become-user=root
-#!/bin/bash
-set -e
-set -o errexit
-set -o pipefail
-
-echo "[ICN] Downloading EMCO k8s"
-git clone "https://gerrit.onap.org/r/multicloud/k8s"
-cp ~/aio.sh k8s/kud/hosting_providers/baremetal/aio.sh
-cp ~/installer.sh k8s/kud/hosting_providers/vagrant/installer.sh
-
-echo "[ICN] Installing EMCO k8s"
-sudo chown root:root /var/lib/jenkins/.netrc
-sudo k8s/kud/hosting_providers/baremetal/aio.sh
-sudo chown jenkins:jenkins /var/lib/jenkins/.netrc
-sudo chown jenkins:jenkins -R /var/lib/jenkins/workspace/icn-bluval-daily-master/k8s/kud/hosting_providers/vagrant
-# the .netrc chown is a temporary workaround, needs to be fixed in multicloud-k8s
-sleep 5
-
-echo "[ICN] Patching EMCO k8s security vulnerabilities"
-kubectl replace -f - << EOF
-apiVersion: rbac.authorization.k8s.io/v1
-kind: ClusterRole
-metadata:
- name: pod-reader
- annotations:
- rbac.authorization.kubernetes.io/autoupdate: "false"
- labels:
- kubernetes.io/bootstrapping: rbac-defaults
- name: system:public-info-viewer
-rules:
-- nonResourceURLs:
- - /livez
- - /readyz
- - /healthz
- verbs:
- - get
-EOF
-kubectl replace -f - << EOF
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: default
-automountServiceAccountToken: false
+#!/usr/bin/env bash
+
+# Ensure we fail the job if any steps fail
+# Disable 'globbing'
+set -eux -o pipefail
+
+SCRIPT_DIR="$(readlink -f $(dirname ${BASH_SOURCE[0]}))"
+
+echo "[ICN] Downloading ICN"
+git clone "https://gerrit.akraino.org/r/icn"
+
+echo "[ICN] Bringing up test cluster"
+function clean_vm {
+ pushd ${SCRIPT_DIR}/icn
+ vagrant destroy -f
+ popd
+}
+trap clean_vm EXIT
+pushd icn
+# TODO Improve VM performance by only using cores on the same node
+#sed -i -e '/^\s\+libvirt.cpus/!b' -e "h;s/\S.*/libvirt.cpuset = '0-21,44-65'/;H;g" Vagrantfile
+vagrant destroy -f
+vagrant up --no-parallel
+vagrant ssh jump -c "
+set -exuf
+cd /icn
+sudo su -c 'make jump_server vm_cluster'
+"
+popd
+
+echo "[ICN] Installing jenkins identity into test cluster"
+cp ${SCRIPT_DIR}/icn/deploy/site/vm/id_rsa site-vm-rsa
+chmod 0600 site-vm-rsa
+ssh-keygen -f ${CLUSTER_SSH_KEY} -y > ${CLUSTER_SSH_KEY}.pub
+ssh-copy-id -i ${CLUSTER_SSH_KEY} -f ${CLUSTER_SSH_USER}@${CLUSTER_MASTER_IP} -o IdentityFile=site-vm-rsa -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null
+
+echo "[ICN] Patching kube-hunter image location"
+cat <<'EOF' | patch -p1
+diff --git a/tests/variables.yaml b/tests/variables.yaml
+index fa3fe71..c54f37f 100644
+--- a/tests/variables.yaml
++++ b/tests/variables.yaml
+@@ -82,3 +82,7 @@ dns_domain: cluster.local # cluster's DNS domain
+ # NONE, WARN, INFO, DEBUG, and TRACE.
+ # Default is INFO
+ loglevel: INFO
++
++kube_hunter:
++ path: 'aquasec'
++ name: 'kube-hunter:edge'
EOF
echo "[ICN] Downloading run_bluval.sh from upstream ci-management"
}}
trap clean_vm EXIT
+# TODO Improve VM performance by only using cores on the same node
+#sed -i -e '/^\s\+libvirt.cpus/!b' -e "h;s/\S.*/libvirt.cpuset = '0-21,44-65'/;H;g" Vagrantfile
+
vagrant destroy -f
vagrant up --no-parallel
-vagrant ssh -c "
+vagrant ssh jump -c "
set -exuf
cd /icn
sudo su -c 'make {target}'
- name: install and configure vagrant
block:
- - name: install libvirt and vagrant
+ - name: install vagrant dependencies
package:
name: "{{ item }}"
with_items:
- - libvirt-bin
- - vagrant
+ - libvirt-daemon-system
+ - libvirt-clients
+ - libvirt-dev
+ - nfs-kernel-server
+
+ - name: install vagrant repository key
+ apt_key:
+ url: https://apt.releases.hashicorp.com/gpg
+
+ - name: add vagrant repository
+ apt_repository:
+ repo: deb https://apt.releases.hashicorp.com {{ ansible_distribution_release | lower }} main
+
+ - name: install vagrant
+ package:
+ name: vagrant
- name: add jenkins user to libvirt and vagrant group
user:
name: jenkins
append: yes
groups:
+ - kvm
- libvirt
- name: setup vagrant-libvirt plugin