ENV:=$(CURDIR)/env
BMDIR:=$(CURDIR)/env/metal3
METAL3DIR:=$(CURDIR)/deploy/metal3/scripts
+METAL3VMDIR:=$(CURDIR)/deploy/metal3-vm
BPA_OPERATOR:=$(CURDIR)/cmd/bpa-operator/
KUD_PATH:=$(CURDIR)/deploy/kud
BPA_E2E_SETUP:=https://raw.githubusercontent.com/onap/multicloud-k8s/master/kud/hosting_providers/vagrant/setup.sh
bm_all: bm_preinstall bm_install
+kud_bm_deploy_mini:
+ pushd $(KUD_PATH) && ./kud_bm_launch.sh minimal && popd
+
kud_bm_deploy:
- pushd $(KUD_PATH) && ./kud_bm_launch.sh && popd
+ pushd $(KUD_PATH) && ./kud_bm_launch.sh virtlet && popd
+
+metal3_prerequisite:
+ pushd $(METAL3VMDIR) && make bmh_install && popd
+
+metal3_vm:
+ pushd $(METAL3VMDIR) && make bmh && popd
bpa_op_install:
pushd $(BPA_OPERATOR) && make docker && make deploy && popd
pushd $(ENV) && ./cd_package_installer.sh && popd
verify_all: prerequisite \
- kud_bm_deploy
+ metal3_prerequisite \
+ kud_bm_deploy_mini \
+ metal3_vm
verifier: verify_all
sudo apt update
sudo apt install -y make
cd /vagrant
-sudo make verifier
+sudo su -c 'make verifier'
"
#!/bin/bash
+set +x
+
LIBDIR="$(dirname "$(dirname "$PWD")")"
source $LIBDIR/env/lib/common.sh
function set_bm_kud {
pushd $DOWNLOAD_PATH/multicloud-k8s/kud/hosting_providers/vagrant/inventory
HOST_IP=${HOST_IP:-$(hostname -I | cut -d ' ' -f 1)}
+ if [ "$1" == "virlet" ] ; then
cat <<EOL > hosts.ini
[all]
$HOSTNAME ansible_ssh_host=${HOST_IP} ansible_ssh_port=22
kube-node
kube-master
EOL
+ else
+ cat <<EOL > hosts.ini
+[all]
+$HOSTNAME ansible_ssh_host=${HOST_IP} ansible_ssh_port=22
+
+[kube-master]
+$HOSTNAME
+
+[kube-node]
+$HOSTNAME
+
+[etcd]
+$HOSTNAME
+
+[k8s-cluster:children]
+kube-node
+kube-master
+EOL
+ fi
popd
}
call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
}
-
get_kud_repo
set_ssh_key
-set_bm_kud
+set_bm_kud $1
kud_install
verifier
--- /dev/null
+logs
+config_*.sh
+!config_example.sh
+master-0*.yaml
+worker-0*.yaml
+*.bk
+*.tmp
+*.swp
--- /dev/null
+#!/usr/bin/env bash
+set +x
+
+OS=$(awk -F= '/^ID=/ { print $2 }' /etc/os-release | tr -d '"')
+if [[ $OS == ubuntu ]]; then
+ # shellcheck disable=SC1091
+ source ubuntu_install_requirements.sh
+else
+ # shellcheck disable=SC1091
+ source centos_install_requirements.sh
+fi
--- /dev/null
+#!/usr/bin/env bash
+set -xe
+
+# shellcheck disable=SC1091
+source lib/logging.sh
+# shellcheck disable=SC1091
+source lib/common.sh
+
+# Generate user ssh key
+if [ ! -f "$HOME/.ssh/id_rsa.pub" ]; then
+ ssh-keygen -f ~/.ssh/id_rsa -P ""
+fi
+
+# root needs a private key to talk to libvirt
+# See tripleo-quickstart-config/roles/virtbmc/tasks/configure-vbmc.yml
+if sudo [ ! -f /root/.ssh/id_rsa_virt_power ]; then
+ sudo ssh-keygen -f /root/.ssh/id_rsa_virt_power -P ""
+ sudo cat /root/.ssh/id_rsa_virt_power.pub | sudo tee -a /root/.ssh/authorized_keys
+fi
+
+ANSIBLE_FORCE_COLOR=true ansible-playbook \
+ -e "working_dir=$WORKING_DIR" \
+ -e "num_masters=$NUM_MASTERS" \
+ -e "num_workers=$NUM_WORKERS" \
+ -e "extradisks=$VM_EXTRADISKS" \
+ -e "virthost=$HOSTNAME" \
+ -e "platform=$NODES_PLATFORM" \
+ -e "manage_baremetal=$MANAGE_BR_BRIDGE" \
+ -i vm-setup/inventory.ini \
+ -b -vvv vm-setup/setup-playbook.yml
+
+# Allow local non-root-user access to libvirt
+# Restart libvirtd service to get the new group membership loaded
+if ! id "$USER" | grep -q libvirt; then
+ sudo usermod -a -G "libvirt" "$USER"
+ sudo systemctl restart libvirtd
+fi
+# Usually virt-manager/virt-install creates this: https://www.redhat.com/archives/libvir-list/2008-August/msg00179.html
+if ! virsh pool-uuid default > /dev/null 2>&1 ; then
+ virsh pool-define /dev/stdin <<EOF
+<pool type='dir'>
+ <name>default</name>
+ <target>
+ <path>/var/lib/libvirt/images</path>
+ </target>
+</pool>
+EOF
+ virsh pool-start default
+ virsh pool-autostart default
+fi
+
+if [[ $OS == ubuntu ]]; then
+ # source ubuntu_bridge_network_configuration.sh
+ # shellcheck disable=SC1091
+ source ubuntu_bridge_network_configuration.sh
+ # shellcheck disable=SC1091
+ source disable_apparmor_driver_libvirtd.sh
+else
+ if [ "$MANAGE_PRO_BRIDGE" == "y" ]; then
+ # Adding an IP address in the libvirt definition for this network results in
+ # dnsmasq being run, we don't want that as we have our own dnsmasq, so set
+ # the IP address here
+ if [ ! -e /etc/sysconfig/network-scripts/ifcfg-provisioning ] ; then
+ echo -e "DEVICE=provisioning\nTYPE=Bridge\nONBOOT=yes\nNM_CONTROLLED=no\nBOOTPROTO=static\nIPADDR=172.22.0.1\nNETMASK=255.255.255.0" | sudo dd of=/etc/sysconfig/network-scripts/ifcfg-provisioning
+ fi
+ sudo ifdown provisioning || true
+ sudo ifup provisioning
+
+ # Need to pass the provision interface for bare metal
+ if [ "$PRO_IF" ]; then
+ echo -e "DEVICE=$PRO_IF\nTYPE=Ethernet\nONBOOT=yes\nNM_CONTROLLED=no\nBRIDGE=provisioning" | sudo dd of="/etc/sysconfig/network-scripts/ifcfg-$PRO_IF"
+ sudo ifdown "$PRO_IF" || true
+ sudo ifup "$PRO_IF"
+ fi
+ fi
+
+ if [ "$MANAGE_INT_BRIDGE" == "y" ]; then
+ # Create the baremetal bridge
+ if [ ! -e /etc/sysconfig/network-scripts/ifcfg-baremetal ] ; then
+ echo -e "DEVICE=baremetal\nTYPE=Bridge\nONBOOT=yes\nNM_CONTROLLED=no" | sudo dd of=/etc/sysconfig/network-scripts/ifcfg-baremetal
+ fi
+ sudo ifdown baremetal || true
+ sudo ifup baremetal
+
+ # Add the internal interface to it if requests, this may also be the interface providing
+ # external access so we need to make sure we maintain dhcp config if its available
+ if [ "$INT_IF" ]; then
+ echo -e "DEVICE=$INT_IF\nTYPE=Ethernet\nONBOOT=yes\nNM_CONTROLLED=no\nBRIDGE=baremetal" | sudo dd of="/etc/sysconfig/network-scripts/ifcfg-$INT_IF"
+ if sudo nmap --script broadcast-dhcp-discover -e "$INT_IF" | grep "IP Offered" ; then
+ echo -e "\nBOOTPROTO=dhcp\n" | sudo tee -a /etc/sysconfig/network-scripts/ifcfg-baremetal
+ sudo systemctl restart network
+ else
+ sudo systemctl restart network
+ fi
+ fi
+ fi
+
+ # restart the libvirt network so it applies an ip to the bridge
+ if [ "$MANAGE_BR_BRIDGE" == "y" ] ; then
+ sudo virsh net-destroy baremetal
+ sudo virsh net-start baremetal
+ if [ "$INT_IF" ]; then #Need to bring UP the NIC after destroying the libvirt network
+ sudo ifup "$INT_IF"
+ fi
+ fi
+fi
+
+# Add firewall rules to ensure the IPA ramdisk can reach httpd, Ironic and the Inspector API on the host
+for port in 80 5050 6385 ; do
+ if ! sudo iptables -C INPUT -i provisioning -p tcp -m tcp --dport $port -j ACCEPT > /dev/null 2>&1; then
+ sudo iptables -I INPUT -i provisioning -p tcp -m tcp --dport $port -j ACCEPT
+ fi
+done
+
+# Allow ipmi to the virtual bmc processes that we just started
+if ! sudo iptables -C INPUT -i baremetal -p udp -m udp --dport 6230:6235 -j ACCEPT 2>/dev/null ; then
+ sudo iptables -I INPUT -i baremetal -p udp -m udp --dport 6230:6235 -j ACCEPT
+fi
+
+#Allow access to dhcp and tftp server for pxeboot
+for port in 67 69 ; do
+ if ! sudo iptables -C INPUT -i provisioning -p udp --dport $port -j ACCEPT 2>/dev/null ; then
+ sudo iptables -I INPUT -i provisioning -p udp --dport $port -j ACCEPT
+ fi
+done
+
+# Need to route traffic from the provisioning host.
+if [ "$EXT_IF" ]; then
+ sudo iptables -t nat -A POSTROUTING --out-interface "$EXT_IF" -j MASQUERADE
+ sudo iptables -A FORWARD --in-interface baremetal -j ACCEPT
+fi
+
+# Switch NetworkManager to internal DNS
+
+if [[ "$MANAGE_BR_BRIDGE" == "y" && $OS == "centos" ]] ; then
+ sudo mkdir -p /etc/NetworkManager/conf.d/
+ sudo crudini --set /etc/NetworkManager/conf.d/dnsmasq.conf main dns dnsmasq
+ if [ "$ADDN_DNS" ] ; then
+ echo "server=$ADDN_DNS" | sudo tee /etc/NetworkManager/dnsmasq.d/upstream.conf
+ fi
+ if systemctl is-active --quiet NetworkManager; then
+ sudo systemctl reload NetworkManager
+ else
+ sudo systemctl restart NetworkManager
+ fi
+fi
+
+mkdir -p "$IRONIC_DATA_DIR/html/images"
+pushd "$IRONIC_DATA_DIR/html/images"
+if [ ! -f ironic-python-agent.initramfs ]; then
+ curl --insecure --compressed -L https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo/ironic-python-agent.tar | tar -xf -
+fi
+BM_IMAGE=${BM_IMAGE:-"bionic-server-cloudimg-amd64.img"}
+BM_IMAGE_URL=${BM_IMAGE_URL:-"https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img"}
+if [ ! -f ${BM_IMAGE} ] ; then
+ curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
+ md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
+fi
+popd
+
+for IMAGE_VAR in IRONIC_IMAGE IRONIC_INSPECTOR_IMAGE ; do
+ IMAGE=${!IMAGE_VAR}
+ sudo "${CONTAINER_RUNTIME}" pull "$IMAGE"
+done
+
+for name in ironic ironic-inspector dnsmasq httpd mariadb; do
+ sudo "${CONTAINER_RUNTIME}" ps | grep -w "$name$" && sudo "${CONTAINER_RUNTIME}" kill $name
+ sudo "${CONTAINER_RUNTIME}" ps --all | grep -w "$name$" && sudo "${CONTAINER_RUNTIME}" rm $name -f
+done
+
+# set password for mariadb
+mariadb_password="$(echo "$(date;hostname)"|sha256sum |cut -c-20)"
+
+
+if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then
+ # Remove existing pod
+ if sudo "${CONTAINER_RUNTIME}" pod exists ironic-pod ; then
+ sudo "${CONTAINER_RUNTIME}" pod rm ironic-pod -f
+ fi
+ # Create pod
+ sudo "${CONTAINER_RUNTIME}" pod create -n ironic-pod
+ POD_NAME="--pod ironic-pod"
+else
+ POD_NAME=""
+fi
+
+mkdir -p "$IRONIC_DATA_DIR"
+
+# Start dnsmasq, http, mariadb, and ironic containers using same image
+sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name dnsmasq ${POD_NAME} \
+ -v "$IRONIC_DATA_DIR":/shared --entrypoint /bin/rundnsmasq "${IRONIC_IMAGE}"
+
+sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name httpd ${POD_NAME} \
+ -v "$IRONIC_DATA_DIR":/shared --entrypoint /bin/runhttpd "${IRONIC_IMAGE}"
+
+sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name mariadb ${POD_NAME} \
+ -v "$IRONIC_DATA_DIR":/shared --entrypoint /bin/runmariadb \
+ --env MARIADB_PASSWORD="$mariadb_password" "${IRONIC_IMAGE}"
+
+sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name ironic ${POD_NAME} \
+ --env MARIADB_PASSWORD="$mariadb_password" \
+ -v "$IRONIC_DATA_DIR":/shared "${IRONIC_IMAGE}"
+
+# Start Ironic Inspector
+sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name ironic-inspector ${POD_NAME} "${IRONIC_INSPECTOR_IMAGE}"
--- /dev/null
+#!/bin/bash
+set -xe
+
+# shellcheck disable=SC1091
+source lib/logging.sh
+# shellcheck disable=SC1091
+source lib/common.sh
+
+eval "$(go env)"
+export GOPATH
+
+# Environment variables
+# M3PATH : Path to clone the metal3 dev env repo
+# BMOPATH : Path to clone the baremetal operator repo
+#
+# BMOREPO : Baremetal operator repository URL
+# BMOBRANCH : Baremetal operator repository branch to checkout
+# FORCE_REPO_UPDATE : discard existing directories
+#
+# BMO_RUN_LOCAL : run the baremetal operator locally (not in Kubernetes cluster)
+
+M3PATH="${GOPATH}/src/github.com/metal3-io"
+BMOPATH="${M3PATH}/baremetal-operator"
+
+BMOREPO="${BMOREPO:-https://github.com/metal3-io/baremetal-operator.git}"
+BMOBRANCH="${BMOBRANCH:-3d40caa29dce82878d83aeb7f8dab4dc4a856160}"
+FORCE_REPO_UPDATE="${FORCE_REPO_UPDATE:-false}"
+
+BMO_RUN_LOCAL="${BMO_RUN_LOCAL:-false}"
+COMPUTE_NODE_PASSWORD="${COMPUTE_NODE_PASSWORD:-mypasswd}"
+BM_IMAGE=${BM_IMAGE:-"bionic-server-cloudimg-amd64.img"}
+IMAGE_URL=http://172.22.0.1/images/${BM_IMAGE}
+IMAGE_CHECKSUM=http://172.22.0.1/images/${BM_IMAGE}.md5sum
+
+function clone_repos() {
+ mkdir -p "${M3PATH}"
+ if [[ -d ${BMOPATH} && "${FORCE_REPO_UPDATE}" == "true" ]]; then
+ rm -rf "${BMOPATH}"
+ fi
+ if [ ! -d "${BMOPATH}" ] ; then
+ pushd "${M3PATH}"
+ git clone "${BMOREPO}"
+ popd
+ fi
+ pushd "${BMOPATH}"
+ git checkout "${BMOBRANCH}"
+ git pull -r || true
+ popd
+}
+
+function launch_baremetal_operator() {
+ pushd "${BMOPATH}"
+ if [ "${BMO_RUN_LOCAL}" = true ]; then
+ touch bmo.out.log
+ touch bmo.err.log
+ make deploy
+ kubectl scale deployment metal3-baremetal-operator -n metal3 --replicas=0
+ nohup make run >> bmo.out.log 2>>bmo.err.log &
+ else
+ make deploy
+ fi
+ popd
+}
+
+network_config_files() {
+cat << 'EOF'
+write_files:
+- path: /opt/ironic_net.sh
+ owner: root:root
+ permissions: '0777'
+ content: |
+ #!/usr/bin/env bash
+ set -xe
+ for intf in /sys/class/net/*; do
+ sudo ifconfig `basename $intf` up
+ sudo dhclient -nw `basename $intf`
+ done
+runcmd:
+ - [ /opt/ironic_net.sh ]
+EOF
+}
+
+create_userdata() {
+ name="$1"
+ COMPUTE_NODE_FQDN="$name.akraino.icn.org"
+ printf "#cloud-config\n" > $name-userdata.yaml
+ if [ -n "$COMPUTE_NODE_PASSWORD" ]; then
+ printf "password: ""%s" "$COMPUTE_NODE_PASSWORD" >> $name-userdata.yaml
+ printf "\nchpasswd: {expire: False}\n" >> $name-userdata.yaml
+ printf "ssh_pwauth: True\n" >> $name-userdata.yaml
+ fi
+
+ if [ -n "$COMPUTE_NODE_FQDN" ]; then
+ printf "fqdn: ""%s" "$COMPUTE_NODE_FQDN" >> $name-userdata.yaml
+ printf "\n" >> $name-userdata.yaml
+ fi
+ printf "disable_root: false\n" >> $name-userdata.yaml
+ printf "ssh_authorized_keys:\n - " >> $name-userdata.yaml
+
+ if [ ! -f $HOME/.ssh/id_rsa.pub ]; then
+ yes y | ssh-keygen -t rsa -N "" -f $HOME/.ssh/id_rsa
+ fi
+
+ cat $HOME/.ssh/id_rsa.pub >> $name-userdata.yaml
+ network_config_files >> $name-userdata.yaml
+ printf "\n" >> $name-userdata.yaml
+}
+
+apply_userdata_credential() {
+ name="$1"
+ cat <<EOF > ./$name-user-data-credential.yaml
+apiVersion: v1
+data:
+ userData: $(base64 -w 0 $name-userdata.yaml)
+kind: Secret
+metadata:
+ name: $name-user-data
+ namespace: metal3
+type: Opaque
+EOF
+ kubectl apply -n metal3 -f $name-user-data-credential.yaml
+}
+
+function make_bm_hosts() {
+ while read -r name address user password mac; do
+ create_userdata $name
+ apply_userdata_credential $name
+ go run "${BMOPATH}"/cmd/make-bm-worker/main.go \
+ -address "$address" \
+ -password "$password" \
+ -user "$user" \
+ -boot-mac "$mac" \
+ "$name" > $name-bm-node.yaml
+ printf " image:" >> $name-bm-node.yaml
+ printf "\n url: ""%s" "${IMAGE_URL}" >> $name-bm-node.yaml
+ printf "\n checksum: ""%s" "${IMAGE_CHECKSUM}" >> $name-bm-node.yaml
+ printf "\n userData:" >> $name-bm-node.yaml
+ printf "\n name: ""%s" "$name""-user-data" >> $name-bm-node.yaml
+ printf "\n namespace: metal3\n" >> $name-bm-node.yaml
+ kubectl apply -f $name-bm-node.yaml -n metal3
+ done
+}
+
+function apply_bm_hosts() {
+ list_nodes | make_bm_hosts
+}
+
+
+clone_repos
+launch_baremetal_operator
+apply_bm_hosts
--- /dev/null
+#!/usr/bin/env bash
+
+set -x
+
+# shellcheck disable=SC1091
+source lib/common.sh
+
+node=0
+declare -i timeout=30
+declare -i interval=60
+
+function check_num_hosts() {
+ while read -r name address user password mac; do
+ ((node+=1))
+ done
+ return $node
+}
+
+function check_bm_state() {
+ c=1
+ n=$1
+ while [ $c -le $n ]
+ do
+ echo "Welcone $c times"
+ (( c++ ))
+ done
+}
+
+function check_provisioned() {
+ declare -i prev_host_state=0
+ declare -i j=0
+ while read -r name address user password mac; do
+ declare -i current_host_state=0
+ state=$(kubectl get baremetalhosts $name -n metal3 -o json | jq -r '.status.provisioning.state')
+ echo $name":"$state
+
+ if [ $state == "provisioned" ];then
+ current_host_state=1
+ fi
+
+ echo "j:"$j
+ echo "current_host_state":$current_host_state
+ echo "prev_host_state":$prev_host_state
+
+ if [ $j -eq 0 ]; then
+ prev_host_state=$current_host_state
+ ((j+=1))
+ continue
+ fi
+
+ if [ $current_host_state -eq 1 ] && [ $prev_host_state -eq 1 ]; then
+ prev_host_state=1
+ else
+ prev_host_state=0
+ fi
+
+ echo "after:prev_host_state:"$prev_host_state
+ ((j+=1))
+ done
+ return $prev_host_state
+}
+
+function wait_for_provisioned() {
+ all_bmh_provisioned=1
+ while ((timeout > 0)); do
+ echo "Try $timeout: Wait for $interval seconds to check all bmh state"
+ sleep $interval
+ list_nodes | check_provisioned
+ all_bmh_state=$?
+ if [[ $all_bmh_state -eq $all_bmh_provisioned ]]; then
+ echo "All the bmh state is provisioned - vsuccess"
+ exit 0
+ fi
+ ((timeout-=1))
+ done
+ exit 1
+}
+
+function verify_bm_hosts() {
+ #list_nodes | check_num_hosts
+ #nodes=$?
+ #check_bm_state $nodes
+ wait_for_provisioned
+}
+
+verify_bm_hosts
--- /dev/null
+#!/usr/bin/env bash
+set -x
+
+# shellcheck disable=SC1091
+source lib/logging.sh
+# shellcheck disable=SC1091
+source lib/common.sh
+
+# Kill and remove the running ironic containers
+for name in ironic ironic-inspector dnsmasq httpd mariadb; do
+ sudo "${CONTAINER_RUNTIME}" ps | grep -w "$name$" && sudo "${CONTAINER_RUNTIME}" kill $name
+ sudo "${CONTAINER_RUNTIME}" ps --all | grep -w "$name$" && sudo "${CONTAINER_RUNTIME}" rm $name -f
+done
+
+# Remove existing pod
+if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then
+ if sudo "${CONTAINER_RUNTIME}" pod exists ironic-pod ; then
+ sudo "${CONTAINER_RUNTIME}" pod rm ironic-pod -f
+ fi
+fi
+
+# Kill the locally running operators
+if [ "${BMO_RUN_LOCAL}" = true ]; then
+ kill "$(pgrep "operator-sdk")" 2> /dev/null || true
+fi
+if [ "${CAPBM_RUN_LOCAL}" = true ]; then
+ CAPBM_PARENT_PID="$(pgrep -f "go run ./cmd/manager/main.go")"
+ if [[ "${CAPBM_PARENT_PID}" != "" ]]; then
+ CAPBM_GO_PID="$(pgrep -P "${CAPBM_PARENT_PID}" )"
+ kill "${CAPBM_GO_PID}" 2> /dev/null || true
+ fi
+fi
+
+
+ANSIBLE_FORCE_COLOR=true ansible-playbook \
+ -e "working_dir=$WORKING_DIR" \
+ -e "num_masters=$NUM_MASTERS" \
+ -e "num_workers=$NUM_WORKERS" \
+ -e "extradisks=$VM_EXTRADISKS" \
+ -e "virthost=$HOSTNAME" \
+ -e "manage_baremetal=$MANAGE_BR_BRIDGE" \
+ -i vm-setup/inventory.ini \
+ -b -vvv vm-setup/teardown-playbook.yml
+
+sudo rm -rf /etc/NetworkManager/conf.d/dnsmasq.conf
+# There was a bug in this file, it may need to be recreated.
+if [ "$MANAGE_PRO_BRIDGE" == "y" ]; then
+ sudo ifdown provisioning || true
+ sudo rm -f /etc/sysconfig/network-scripts/ifcfg-provisioning || true
+fi
+# Leaving this around causes issues when the host is rebooted
+if [ "$MANAGE_BR_BRIDGE" == "y" ]; then
+ sudo ifdown baremetal || true
+ sudo rm -f /etc/sysconfig/network-scripts/ifcfg-baremetal || true
+fi
--- /dev/null
+all: bmh_install bmh_configure bmh_launch bmh_verify
+
+bmh: bmh_configure bmh_launch bmh_verify
+
+bmh_install:
+ ./01_install_requirements.sh
+
+bmh_configure:
+ ./02_configure_host.sh
+
+bmh_launch:
+ ./03_launch_mgmt_cluster.sh
+
+bmh_verify:
+ ./04_verify.sh
+
+bmh_clean:
+ ./05_host_cleanup.sh
+
+.PHONY: all bmh bmh_install bmh_configure bmh_launch bmh_verify bmh_clean
--- /dev/null
+#!/bin/bash
+
+#
+# This is the subnet used on the "baremetal" libvirt network, created as the
+# primary network interface for the virtual bare metalhosts.
+#
+# Default of 192.168.111.0/24 set in lib/common.sh
+#
+#export EXTERNAL_SUBNET="192.168.111.0/24"
+
+#
+# This SSH key will be automatically injected into the provisioned host
+# by the provision_host.sh script.
+#
+# Default of ~/.ssh/id_rsa.pub is set in lib/common.sh
+#
+#export SSH_PUB_KEY=~/.ssh/id_rsa.pub
+
+#
+# Select the Container Runtime, can be "podman" or "docker"
+# Defaults to "podman"
+#
+#export CONTAINER_RUNTIME="podman"
+
+#
+# Set the Baremetal Operator repository to clone
+#
+#export BMOREPO="${BMOREPO:-https://github.com/metal3-io/baremetal-operator.git}"
+
+#
+# Set the Baremetal Operator branch to checkout
+#
+#export BMOBRANCH="${BMOBRANCH:-master}"
+
+#
+# Force deletion of the BMO and CAPBM repositories before cloning them again
+#
+#export FORCE_REPO_UPDATE="${FORCE_REPO_UPDATE:-false}"
+
+#
+# Run a local baremetal operator instead of deploying in Kubernetes
+#
+#export BMO_RUN_LOCAL=true
--- /dev/null
+#!/usr/bin/env bash
+
+selinux="#security_driver = \"selinux\""
+apparmor="security_driver = \"apparmor\""
+none="security_driver = \"none\""
+sudo sed -i "s/$selinux/$none/g" /etc/libvirt/qemu.conf
+sudo sed -i "s/$apparmor/$none/g" /etc/libvirt/qemu.conf
+sudo systemctl restart libvirtd
--- /dev/null
+#!/bin/bash
+
+eval "$(go env)"
+
+SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+USER=`whoami`
+
+# Get variables from the config file
+if [ -z "${CONFIG:-}" ]; then
+ # See if there's a config_$USER.sh in the SCRIPTDIR
+ if [ ! -f ${SCRIPTDIR}/../config_${USER}.sh ]; then
+ cp ${SCRIPTDIR}/../config_example.sh ${SCRIPTDIR}/../config_${USER}.sh
+ echo "Automatically created config_${USER}.sh with default contents."
+ fi
+ CONFIG="${SCRIPTDIR}/../config_${USER}.sh"
+fi
+source $CONFIG
+
+# Set variables
+# Additional DNS
+ADDN_DNS=${ADDN_DNS:-}
+# External interface for routing traffic through the host
+EXT_IF=${EXT_IF:-}
+# Provisioning interface
+PRO_IF=${PRO_IF:-}
+# Does libvirt manage the baremetal bridge (including DNS and DHCP)
+MANAGE_BR_BRIDGE=${MANAGE_BR_BRIDGE:-y}
+# Only manage bridges if is set
+MANAGE_PRO_BRIDGE=${MANAGE_PRO_BRIDGE:-y}
+MANAGE_INT_BRIDGE=${MANAGE_INT_BRIDGE:-y}
+# Internal interface, to bridge virbr0
+INT_IF=${INT_IF:-}
+#Root disk to deploy coreOS - use /dev/sda on BM
+ROOT_DISK_NAME=${ROOT_DISK_NAME-"/dev/sda"}
+#Container runtime
+CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
+
+export EXTERNAL_SUBNET="192.168.111.0/24"
+
+export SSH_PUB_KEY=~/.ssh/id_rsa.pub
+
+FILESYSTEM=${FILESYSTEM:="/"}
+
+WORKING_DIR=${WORKING_DIR:-"/opt/metal3-vm"}
+NODES_FILE=${NODES_FILE:-"${WORKING_DIR}/ironic_nodes.json"}
+NODES_PLATFORM=${NODES_PLATFORM:-"libvirt"}
+
+export NUM_MASTERS=${NUM_MASTERS:-"1"}
+export NUM_WORKERS=${NUM_WORKERS:-"1"}
+export VM_EXTRADISKS=${VM_EXTRADISKS:-"false"}
+
+# Ironic vars
+export IRONIC_IMAGE=${IRONIC_IMAGE:-"quay.io/metal3-io/ironic:master"}
+export IRONIC_INSPECTOR_IMAGE=${IRONIC_INSPECTOR_IMAGE:-"quay.io/metal3-io/ironic-inspector"}
+export IRONIC_DATA_DIR="$WORKING_DIR/ironic"
+
+# Verify requisites/permissions
+# Connect to system libvirt
+export LIBVIRT_DEFAULT_URI=qemu:///system
+if [ "$USER" != "root" -a "${XDG_RUNTIME_DIR:-}" == "/run/user/0" ] ; then
+ echo "Please use a non-root user, WITH a login shell (e.g. su - USER)"
+ exit 1
+fi
+
+# Check if sudo privileges without password
+if ! sudo -n uptime &> /dev/null ; then
+ echo "sudo without password is required"
+ exit 1
+fi
+
+# Check OS
+OS=$(awk -F= '/^ID=/ { print $2 }' /etc/os-release | tr -d '"')
+if [[ ! $OS =~ ^(centos|rhel|ubuntu)$ ]]; then
+ echo "Unsupported OS"
+ exit 1
+fi
+
+# Check CentOS version
+os_version=$(awk -F= '/^VERSION_ID=/ { print $2 }' /etc/os-release | tr -d '"' | cut -f1 -d'.')
+if [[ ${os_version} -ne 7 ]] && [[ ${os_version} -ne 18 ]]; then
+ echo "Required CentOS 7 or RHEL 7 or Ubuntu 18.04"
+ exit 1
+fi
+
+# Check d_type support
+FSTYPE=$(df ${FILESYSTEM} --output=fstype | grep -v Type)
+
+case ${FSTYPE} in
+ 'ext4'|'btrfs')
+ ;;
+ 'xfs')
+ if [[ $(xfs_info ${FILESYSTEM} | grep -q "ftype=1") ]]; then
+ echo "Filesystem not supported"
+ exit 1
+ fi
+ ;;
+ *)
+ echo "Filesystem not supported"
+ exit 1
+ ;;
+esac
+
+if [ ! -d "$WORKING_DIR" ]; then
+ echo "Creating Working Dir"
+ sudo mkdir "$WORKING_DIR"
+ sudo chown "${USER}:${USER}" "$WORKING_DIR"
+ chmod 755 "$WORKING_DIR"
+fi
+
+function list_nodes() {
+ # Includes -machine and -machine-namespace
+ cat $NODES_FILE | \
+ jq '.nodes[] | {
+ name,
+ driver,
+ address:.driver_info.ipmi_address,
+ port:.driver_info.ipmi_port,
+ user:.driver_info.ipmi_username,
+ password:.driver_info.ipmi_password,
+ mac: .ports[0].address
+ } |
+ .name + " " +
+ .driver + "://" + .address + (if .port then ":" + .port else "" end) + " " +
+ .user + " " + .password + " " + .mac' \
+ | sed 's/"//g'
+}
--- /dev/null
+# Log output automatically
+LOGDIR="$(dirname $0)/logs"
+if [ ! -d "$LOGDIR" ]; then
+ mkdir -p "$LOGDIR"
+fi
+LOGFILE="$LOGDIR/$(basename $0 .sh)-$(date +%F-%H%M%S).log"
+echo "Logging to $LOGFILE"
+# Set fd 1 and 2 to write to the log file
+exec 1> >( tee "${LOGFILE}" ) 2>&1
--- /dev/null
+#!/usr/bin/env bash
+
+set -xe
+
+# shellcheck disable=SC1091
+source lib/logging.sh
+# shellcheck disable=SC1091
+source lib/common.sh
+
+if [ "$MANAGE_PRO_BRIDGE" == "y" ]; then
+ # Adding an IP address in the libvirt definition for this network results in
+ # dnsmasq being run, we don't want that as we have our own dnsmasq, so set
+ # the IP address here
+ sudo brctl addbr provisioning
+ # sudo ifconfig provisioning 172.22.0.1 netmask 255.255.255.0 up
+ # Use ip command. ifconfig commands are deprecated now.
+ sudo ip addr add dev provisioning 172.22.0.1/24
+ sudo ip link set provisioning up
+
+ # Need to pass the provision interface for bare metal
+ if [ "$PRO_IF" ]; then
+ sudo brctl addif provisioning "$PRO_IF"
+ fi
+ fi
+
+ if [ "$MANAGE_INT_BRIDGE" == "y" ]; then
+ # Create the baremetal bridge
+ if ! [[ $(ip a show baremetal) ]]; then
+ sudo brctl addbr baremetal
+ # sudo ifconfig baremetal 192.168.111.1 netmask 255.255.255.0 up
+ # Use ip command. ifconfig commands are deprecated now.
+ sudo ip addr add dev baremetal 192.168.111.1/24
+ sudo ip link set baremetal up
+ fi
+
+ # Add the internal interface to it if requests, this may also be the interface providing
+ # external access so we need to make sure we maintain dhcp config if its available
+ if [ "$INT_IF" ]; then
+ sudo brctl addif "$INT_IF"
+ fi
+ fi
+
+ # restart the libvirt network so it applies an ip to the bridge
+ if [ "$MANAGE_BR_BRIDGE" == "y" ] ; then
+ sudo virsh net-destroy baremetal
+ sudo virsh net-start baremetal
+ if [ "$INT_IF" ]; then #Need to bring UP the NIC after destroying the libvirt network
+ sudo ifup "$INT_IF"
+ fi
+ fi
--- /dev/null
+#!/usr/bin/env bash
+set -ex
+
+# shellcheck disable=SC1091
+source lib/logging.sh
+# shellcheck disable=SC1091
+source lib/common.sh
+
+# sudo apt install -y libselinux-utils
+# if selinuxenabled ; then
+# sudo setenforce permissive
+# sudo sed -i "s/=enforcing/=permissive/g" /etc/selinux/config
+# fi
+
+# Update to latest packages first
+sudo apt -y update
+
+# Install EPEL required by some packages
+# if [ ! -f /etc/yum.repos.d/epel.repo ] ; then
+# if grep -q "Red Hat Enterprise Linux" /etc/redhat-release ; then
+# sudo yum -y install http://mirror.centos.org/centos/7/extras/x86_64/Packages/epel-release-7-11.noarch.rpm
+# else
+# sudo yum -y install epel-release --enablerepo=extras
+# fi
+# fi
+
+# Work around a conflict with a newer zeromq from epel
+# if ! grep -q zeromq /etc/yum.repos.d/epel.repo; then
+# sudo sed -i '/enabled=1/a exclude=zeromq*' /etc/yum.repos.d/epel.repo
+# fi
+
+# Install required packages
+
+sudo apt -y install \
+ crudini \
+ curl \
+ dnsmasq \
+ figlet \
+ golang \
+ zlib1g-dev \
+ libssl1.0-dev \
+ nmap \
+ patch \
+ psmisc \
+ python-pip \
+ wget
+
+
+
+# Install pyenv
+
+if [[ $(cat ~/.bashrc) != *PYENV_ROOT* ]]; then
+ if ! [ -d "$HOME/.pyenv" ] ; then
+ git clone git://github.com/yyuu/pyenv.git ~/.pyenv
+ fi
+ # shellcheck disable=SC2016
+ # shellcheck disable=SC2129
+ echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bashrc
+ # shellcheck disable=SC2016
+ echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bashrc
+ # shellcheck disable=SC2016
+ echo -e 'if command -v pyenv 1>/dev/null 2>&1; then\n eval "$(pyenv init -)"\nfi' >> ~/.bashrc
+fi
+
+if [[ $PATH != *pyenv* ]]; then
+ export PYENV_ROOT="$HOME/.pyenv"
+ export PATH="$PYENV_ROOT/bin:$PATH"
+ if command -v pyenv 1>/dev/null 2>&1; then
+ eval "$(pyenv init -)"
+ fi
+fi
+
+pyenv install -s 2.7.5
+pyenv versions
+pyenv global 2.7.5
+# There are some packages which are newer in the tripleo repos
+
+# Setup yarn and nodejs repositories
+#sudo curl -sL https://dl.yarnpkg.com/rpm/yarn.repo -o /etc/yum.repos.d/yarn.repo
+curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
+#curl -sL https://rpm.nodesource.com/setup_10.x | sudo bash -
+echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
+
+# Add this repository to install podman
+sudo add-apt-repository -y ppa:projectatomic/ppa
+# Add this repository to install Golang 1.12
+sudo add-apt-repository -y ppa:longsleep/golang-backports
+
+# Update some packages from new repos
+sudo apt -y update
+
+# make sure additional requirments are installed
+
+##No bind-utils. It is for host, nslookop,..., no need in ubuntu
+sudo apt -y install \
+ jq \
+ libguestfs-tools \
+ nodejs \
+ qemu-kvm \
+ libvirt-bin libvirt-clients libvirt-dev \
+ python-ironicclient \
+ python-ironic-inspector-client \
+ golang-go \
+ python-lxml \
+ unzip \
+ yarn \
+ genisoimage
+
+
+if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then
+ sudo apt -y install podman
+else
+ sudo apt -y install \
+ apt-transport-https \
+ ca-certificates \
+ gnupg-agent \
+ software-properties-common
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ sudo add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+ sudo apt update
+ sudo apt install -y docker-ce docker-ce-cli containerd.io
+ sudo systemctl start docker
+fi
+
+# Install python packages not included as rpms
+sudo pip install \
+ ansible==2.8.2 \
+ lolcat \
+ yq \
+ virtualbmc \
+ python-ironicclient \
+ python-ironic-inspector-client \
+ lxml \
+ netaddr \
+ requests \
+ setuptools \
+ libvirt-python \
--- /dev/null
+[virthost]
+localhost
--- /dev/null
+#!/usr/bin/python
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# generate_baremetal_macs method ripped from
+# openstack/tripleo-incubator/scripts/configure-vm
+
+import math
+import random
+
+DOCUMENTATION = '''
+---
+module: generate_macs
+version_added: "2.0"
+short_description: Generate a list of Ethernet MAC addresses
+description:
+ - Generate a list of Ethernet MAC addresses suitable for baremetal testing.
+'''
+
+MAX_NUM_MACS = math.trunc(0xff / 2)
+
+
+def generate_baremetal_macs(nodes, networks):
+ """Generate an Ethernet MAC address suitable for baremetal testing."""
+ # NOTE(dprince): We generate our own bare metal MAC address's here
+ # instead of relying on libvirt so that we can ensure the
+ # locally administered bit is set low. (The libvirt default is
+ # to set the 2nd MSB high.) This effectively allows our
+ # fake baremetal VMs to more accurately behave like real hardware
+ # and fixes issues with bridge/DHCP configurations which rely
+ # on the fact that bridges assume the MAC address of the lowest
+ # attached NIC.
+ # MACs generated for a given machine will also be in sequential
+ # order, which matches how most BM machines are laid out as well.
+ # Additionally we increment each MAC by two places.
+ macs = []
+ count = len(nodes) * len(networks)
+
+ if count > MAX_NUM_MACS:
+ raise ValueError("The MAX num of MACS supported is %i "
+ "(you specified %i)." % (MAX_NUM_MACS, count))
+
+ base_nums = [0x00,
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ base_mac = ':'.join(map(lambda x: "%02x" % x, base_nums))
+
+ start = random.randint(0x00, 0xff)
+ if (start + (count * 2)) > 0xff:
+ # leave room to generate macs in sequence
+ start = 0xff - count * 2
+ for num in range(0, count * 2, 2):
+ mac = start + num
+ macs.append(base_mac + ":" + ("%02x" % mac))
+
+ result = {}
+ for node in nodes:
+ result[node['name']] = {}
+ for network in networks:
+ result[node['name']][network['name']] = macs.pop(0)
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ nodes=dict(required=True, type='list'),
+ networks=dict(required=True, type='list')
+ )
+ )
+ result = generate_baremetal_macs(module.params["nodes"],
+ module.params["networks"])
+ module.exit_json(**result)
+
+# see http://docs.ansible.com/developing_modules.html#common-module-boilerplate
+from ansible.module_utils.basic import AnsibleModule # noqa
+
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+non_root_user: "{{ lookup('env', 'USER') }}"
+
+# base domain to use
+cluster_domain: "{{ lookup('env', 'CLUSTER_DOMAIN') | default('ostest.test.metalkube.org', true) }}"
+
+# allow the nic model to be overridden
+libvirt_nic_model: virtio
+
+# These defaults are used if there are no flavor-specific
+# overrides configured.
+default_disk: 50
+default_memory: 8192
+default_vcpu: 8
+num_masters: 1
+num_workers: 1
+extradisks: false
+virtualbmc_base_port: 6230
+flavors:
+ master:
+ memory: '{{master_memory|default(default_memory)}}'
+ disk: '{{master_disk|default(default_disk)}}'
+ vcpu: '{{master_vcpu|default(default_vcpu)}}'
+ extradisks: '{{extradisks|bool}}'
+
+ worker:
+ memory: '{{worker_memory|default(default_memory)}}'
+ disk: '{{worker_disk|default(default_disk)}}'
+ vcpu: '{{worker_vcpu|default(default_vcpu)}}'
+ extradisks: '{{extradisks|bool}}'
+
+# An optional prefix for node names
+ironic_prefix: ""
+
+baremetal_network_cidr: "{{ lookup('env', 'EXTERNAL_SUBNET') | default('192.168.111.0/24', true) }}"
+
+# Set this to `false` if you don't want your vms
+# to have a VNC console available.
+enable_vnc_console: true
+
+# Path for volume storage
+libvirt_volume_path: "{{ working_dir }}/pool"
+
+# These ensure we're using privileged virt, so VMs persist over reboot
+libvirt_uri: qemu:///system
+ssh_user: root
--- /dev/null
+---
+
+- set_fact:
+ generate_vm_nodes: "{{vm_nodes is not defined}}"
+
+- name: Define vm_nodes if not already defined
+ when: generate_vm_nodes
+ block:
+ - name: Generate vm_nodes for "{{num_masters}}" masters
+ set_fact:
+ vm_nodes: "{{vm_nodes|default([]) + [
+ {'name': ironic_prefix + 'master_%s'|format(item),
+ 'flavor': 'master',
+ 'virtualbmc_port': virtualbmc_base_port+item}]}}"
+ loop: "{{ range(0, num_masters|int)|list }}"
+
+ - name: Generate vm_nodes for "{{num_workers}}" workers
+ set_fact:
+ vm_nodes: "{{vm_nodes|default([]) + [
+ {'name': ironic_prefix + 'worker_%s'|format(item),
+ 'flavor': 'worker',
+ 'virtualbmc_port': virtualbmc_base_port+num_masters|int+item} ]}}"
+ loop: "{{ range(0, num_workers|int)|list }}"
+
+# Describe our virtual networks. These networks will be attached to
+# the vm nodes in the order in which they are defined with the following caveats:
+# * The first bridge network defined will be used for pxe booting
+- set_fact:
+ generate_networks: "{{networks is not defined}}"
+- name: Define networks when not already defined
+ when: generate_networks
+ block:
+ - name: Generate dhcp entries on baremetal network for "{{num_masters}}" masters
+ set_fact:
+ dhcp_hosts: "{{dhcp_hosts|default([]) + [
+ {'name': 'master-%s'|format(item),
+ 'ip': baremetal_network_cidr|nthhost(20+item)|string}]}}"
+ loop: "{{ range(0, num_masters|int)|list }}"
+
+ - name: Generate dhcp entries on baremetal network for "{{num_workers}}" workers
+ set_fact:
+ dhcp_hosts: "{{dhcp_hosts|default([]) + [
+ {'name': 'worker-%s'|format(item),
+ 'ip': baremetal_network_cidr|nthhost(20+num_masters|int+item)|string} ]}}"
+ loop: "{{ range(0, num_workers|int)|list }}"
+
+ - name: Set fact for networks
+ set_fact:
+ networks:
+ - name: provisioning
+ bridge: provisioning
+ forward_mode: bridge
+ - name: baremetal
+ bridge: baremetal
+ forward_mode: "{% if manage_baremetal == 'y' %}nat{% else %}bridge{% endif %}"
+ address: "{{ baremetal_network_cidr|nthhost(1) }}"
+ netmask: "{{ baremetal_network_cidr|ipaddr('netmask') }}"
+ dhcp_range:
+ - "{{ baremetal_network_cidr|nthhost(20) }}"
+ - "{{ baremetal_network_cidr|nthhost(60) }}"
+ dhcp_hosts: "{{dhcp_hosts}}"
+ nat_port_range:
+ - 1024
+ - 65535
+ domain: "{{ cluster_domain }}"
+ dns:
+ hosts: "{{dns_extrahosts | default([])}}"
+ forwarders:
+ - domain: "apps.{{ cluster_domain }}"
+ addr: "127.0.0.1"
--- /dev/null
+# When libvirt_action==teardown we destroy the existing configuration
+libvirt_action: setup
+
+# For some baremetal testing we set this to "baremetal" so that only the
+# libvirt networking is configured, not the nodes
+vm_platform: libvirt
+
+# Which libvirt session should we use? Using `qemu://session` does
+# not require privileged access (but does require the setup performed by the
+# `environment/setup` role).
+libvirt_volume_pool: oooq_pool
+libvirt_domain_type: kvm
+libvirt_diskdev: sda
+libvirt_diskbus: scsi
+libvirt_arch: x86_64
+libvirt_cpu_mode: host-model
+
+# how many disks should be created when using extradisks
+extradisks_list:
+ - vdb
+
+# size of the disks to create when using extradisks
+extradisks_size: 8G
+
+# The packages required to set up our desired libvirt environment.
+# (Tested on Centos 7)
+libvirt_packages:
+ - qemu-kvm
+ - libvirt
+ - libvirt-python
+ - libguestfs-tools
+ - python-lxml
+ - polkit-pkla-compat
+ - python-netaddr
+ - python2-virtualbmc
+
+# We expect virtualbmc to already be installed on rhel8 as a pre-req to running this,
+# as there's no rhel package available yet.
+libvirt_packages_rhel8:
+ - qemu-kvm
+ - libvirt
+ - python3-libvirt
+ - libguestfs-tools
+ - python3-lxml
+ - polkit-pkla-compat
+ - python3-netaddr
+
+ubuntu_libvirt_packages:
+ - qemu-kvm
+ - libvirt-bin
+ - libvirt-clients
+ - libvirt-dev
+ - python-libvirt
+ - libguestfs-tools
+ - python-lxml
+ - gir1.2-polkit-1.0
+ - libpolkit-agent-1-0
+ - libpolkit-backend-1-0
+ - libpolkit-gobject-1-0
+
+
+# The name of the libvirt service.
+libvirtd_service: libvirtd
--- /dev/null
+#!/bin/sh
+
+# This script will attempt to get the ip address of the a given libvirt guest.
+
+set -eu
+
+PATH=$PATH:/usr/sbin:/sbin
+
+VMNAME=$1
+
+# Get the MAC address of the first interface by looking for looking for the
+# `<mac address...` line. Yes, we're parsing XML with awk. It's probably
+# safe (because the XML is coming from libvirt, so we can be reasonably
+# confident that the formatting will remain the same).
+mac=$(virsh dumpxml $VMNAME | awk -F "'" '/mac address/ { print $2; exit }')
+
+# Look up the MAC address in the ARP table.
+ip=$(ip neigh | grep $mac | awk '{print $1;}')
+
+if [ -z "$ip" ]; then
+ echo "vm ip is not available" >&2
+ exit 1
+fi
+
+echo $ip
--- /dev/null
+dependencies:
+ - role: common
--- /dev/null
+# Install the packages required for our desired libvirt environment.
+# We store the list of packages in `libvirt_packages` so that in
+# theory we can support multiple distributions simply by passing in a
+# different list of packages.
+- name: Install packages for libvirt
+ package:
+ name: "{{ libvirt_packages }}"
+ state: present
+ become: true
+ when: ansible_os_family == "RedHat" and ansible_lsb.major_release|int == 7
+
+- name: Install packages for libvirt
+ package:
+ name: "{{ libvirt_packages_rhel8 }}"
+ state: present
+ become: true
+ when: ansible_os_family == "RedHat" and ansible_lsb.major_release|int == 8
+
+- name: Install packages for libvirt on Ubuntu
+ when:
+ - ansible_facts['distribution'] == "Ubuntu"
+ package:
+ name: "{{ ubuntu_libvirt_packages }}"
+ state: present
+ become: true
+
+- name: Start libvirtd
+ service:
+ name: "{{ libvirtd_service }}"
+ state: started
+ enabled: true
+ become: true
--- /dev/null
+- name: libvirt role setup tasks
+ block:
+ - include_tasks: install_setup_tasks.yml
+ - include_tasks: network_setup_tasks.yml
+ - include_tasks: vm_setup_tasks.yml
+ when: vm_platform == "libvirt"
+ when: libvirt_action == "setup"
+
+- name: libvirt role teardown tasks
+ block:
+ - include_tasks: network_teardown_tasks.yml
+ - include_tasks: vm_teardown_tasks.yml
+ when: libvirt_action == "teardown"
--- /dev/null
+# If virtualport_type is defined for any networks, include OVS dependencies
+- when: networks|selectattr('virtualport_type', 'defined')|map(attribute='name')|list|length > 0
+ block:
+
+ # Install OVS dependencies
+ - name: Install OVS dependencies
+ include_role:
+ name: 'parts/ovs'
+
+ # Create any OVS Bridges that have been defined
+ - name: Create OVS Bridges
+ openvswitch_bridge:
+ bridge: "{{ item.bridge }}"
+ state: present
+ when: item.virtualport_type is defined and item.virtualport_type == "openvswitch"
+ with_items: "{{ networks }}"
+ become: true
+
+# TODO(apuimedo) drop this back to vm tasks once we have proper DNS
+- name: get a list of MACs to use
+ generate_macs:
+ nodes: "{{ vm_nodes }}"
+ networks: "{{ networks }}"
+ register: node_mac_map
+ when: vm_nodes
+
+
+# Create the global, root-managed libvirt networks to which we will
+# attach the undercoud and vm virtual machines.
+- name: Create libvirt networks
+ virt_net:
+ command: define
+ state: present
+ name: "{{ item.name }}"
+ xml: '{{ lookup("template", "network.xml.j2") }}'
+ with_items: "{{ networks }}"
+ become: true
+
+- name: Start libvirt networks
+ virt_net:
+ command: start
+ name: "{{ item.name }}"
+ state: active
+ with_items: "{{ networks }}"
+ become: true
+
+- name: Mark libvirt networks as autostarted
+ virt_net:
+ name: "{{ item.name }}"
+ autostart: "yes"
+ with_items: "{{ networks }}"
+ become: true
+ register: net_autostart
+ ignore_errors: true
+
+# https://bugs.launchpad.net/tripleo-quickstart/+bug/1581676
+# There is a bug w/ virt_net and RHEL where the network xml
+# file is not written to /etc/libvirt/qemu/networks/ This causes
+# network to be considered transient.
+- when: not net_autostart.changed
+ block:
+
+ - name: Check if "virsh net-autostart" was successful
+ debug: msg="Some libvirt networks were not set to autostart. Please see
+ https://bugs.launchpad.net/tripleo-quickstart/+bug/1581676"
+
+ # get the network xml from the running network
+ - name: Get libvirt networks xml
+ virt_net:
+ command: get_xml
+ name: "{{ item.name }}"
+ with_items: "{{ networks }}"
+ register: net_xml
+ become: true
+
+ # copy the xml to a file
+ - name: copy network-xml to file
+ copy: content={{ item.get_xml }} dest=/tmp/network-{{ item.item.name }}.xml
+ with_items: "{{ net_xml.results }}"
+ become: true
+
+ # redefine the network w/ virsh, this will write the xml file to
+ # /etc/libvirt/qemu/networks/ and it will no longer be transient
+ - name: redefine the libvirt networks so the config is written to /etc/libvirt
+ command: virsh net-define /tmp/network-{{ item.name }}.xml
+ with_items: "{{ networks }}"
+ become: true
+
+ # Now we're ready to mark the network autostart
+ - name: Mark libvirt networks as autostarted
+ virt_net:
+ name: "{{ item.name }}"
+ autostart: "yes"
+ with_items: "{{ networks }}"
+ become: true
+
+# Whitelist the bridges associated with these networks for
+# access using qemu [helper networking][helper]. Later on we
+# create virtual machines use an unprivileged `qemu://session`
+# connection, and we connect to the networks using the bridge names.
+#
+# [helper]: http://wiki.qemu.org/Features-Done/HelperNetworking
+- name: Whitelist bridges for unprivileged access on CentOS
+ lineinfile:
+ dest: '/etc/qemu-kvm/bridge.conf' # Needs to be /etc/qemu/bridge.conf if supporting Fedora
+ line: "allow {{ item.bridge }}"
+ with_items: "{{ networks }}"
+ when:
+ - ansible_os_family == "RedHat"
+ become: true
+
+- name: Whitelist bridges for unprivileged access on Ubuntu or Fedora
+ lineinfile:
+ dest: '/etc/qemu/bridge.conf'
+ line: "allow {{ item.bridge }}"
+ create: yes
+ with_items: "{{ networks }}"
+ when:
+ - ansible_facts['distribution'] == "Ubuntu"
+ become: true
+
+# We're going to want to store things in `working_dir` so ensure it
+# exists first. `working_dir` is a directory on the target host.
+- name: Ensure remote working dir exists
+ file:
+ path: "{{ working_dir }}"
+ state: directory
+ become: true
--- /dev/null
+- name: Stop libvirt networks
+ virt_net:
+ command: destroy
+ name: "{{ item.name }}"
+ state: inactive
+ with_items: "{{ networks }}"
+ become: true
+
+- name: Delete libvirt networks
+ virt_net:
+ command: undefine
+ state: absent
+ name: "{{ item.name }}"
+ with_items: "{{ networks }}"
+ become: true
+
+- name: Delete bridges on Ubuntu
+ shell: |
+ sudo ip link set baremetal down
+ sudo ip link set provisioning down
+ brctl delbr baremetal | true
+ brctl delbr provisioning | true
+
+ when:
+ - ansible_distribution == 'Ubuntu'
+
+ become: yes
+
--- /dev/null
+# Create a libvirt volume pool. This is where we'll be creating
+# images for the VMs
+# Note: the virt_pool module is not working properly on rhel-7.2
+# https://bugs.launchpad.net/tripleo-quickstart/+bug/1597905
+- name: ensure libvirt volume path exists
+ become: true
+ file:
+ path: "{{ libvirt_volume_path }}"
+ state: directory
+ mode: 0755
+
+- name: Check volume pool
+ command: >
+ virsh pool-uuid "{{ libvirt_volume_pool }}"
+ register: pool_check
+ ignore_errors: true
+ changed_when: false
+ environment:
+ LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
+
+- name: create the volume pool xml file
+ template:
+ src: volume_pool.xml.j2
+ dest: "{{ working_dir }}/volume_pool.xml"
+ when: pool_check is failed
+
+- name: Define volume pool
+ command: "virsh pool-define {{ working_dir }}/volume_pool.xml"
+ when: pool_check is failed
+ environment:
+ LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
+
+- name: Start volume pool
+ virt_pool:
+ command: start
+ state: active
+ name: "{{ libvirt_volume_pool }}"
+ uri: "{{ libvirt_uri }}"
+
+# In some cases the pool_check can pass and the pool xml config is absent
+# In this case it is required to dump the xml and redefine the pool.
+- name: ensure tripleo-quickstart volume pool is defined
+ shell: >
+ virsh pool-dumpxml {{ libvirt_volume_pool }} |
+ virsh pool-define /dev/stdin
+ changed_when: true
+ environment:
+ LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
+
+- name: Mark volume pool for autostart
+ virt_pool:
+ name: "{{ libvirt_volume_pool }}"
+ autostart: "yes"
+ uri: "{{ libvirt_uri }}"
+
+- when: vm_nodes
+ environment:
+ LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
+ block:
+
+ # Create libvirt volumes for the vm hosts.
+ - name: Check if vm volumes exist
+ command: >
+ virsh vol-info --pool '{{ libvirt_volume_pool }}' '{{ item.name }}.qcow2'
+ register: vm_vol_check
+ ignore_errors: true
+ with_items: "{{ vm_nodes }}"
+
+ - name: Create vm vm storage
+ command: >
+ virsh vol-create-as '{{ libvirt_volume_pool }}'
+ '{{ item.item.name }}'.qcow2 '{{ flavors[item.item.flavor].disk }}'G
+ --format qcow2
+ when:
+ - item is failed
+ with_items: "{{ vm_vol_check.results }}"
+
+ # Define (but do not start) the vm nodes. These will be
+ # booted later by ironic during the provisioning process.
+ - name: Define vm vms
+ virt:
+ name: "{{ item.name }}"
+ command: define
+ xml: "{{ lookup('template', 'baremetalvm.xml.j2') }}"
+ uri: "{{ libvirt_uri }}"
+ with_items: "{{ vm_nodes }}"
+
+ # Create additional blockdevices for each objectstorage flavor node
+ # These are sparse files, not using space if unused
+ - name: Create additional blockdevice for objectstorage nodes
+ command: >
+ dd if=/dev/zero of={{ libvirt_volume_path }}/{{ item[0].name }}_{{ item[1] }}.img bs=1 count=0 seek={{ extradisks_size }}
+ when: flavors[item[0].flavor].extradisks|default(false)
+ with_nested:
+ - "{{ vm_nodes }}"
+ - "{{ extradisks_list }}"
+
+ - name: Check if additional blockdevices are attached
+ command: >
+ virsh domblkinfo {{ item[0].name }} {{ libvirt_volume_path }}/{{ item[0].name }}_{{ item[1] }}.img
+ when: flavors[item[0].flavor].extradisks|default(false)
+ changed_when: false
+ ignore_errors: true
+ register: vm_extradisks_check
+ with_nested:
+ - "{{ vm_nodes }}"
+ - "{{ extradisks_list }}"
+
+ - name: Attach additional blockdevices to vm objectstorage VMs
+ command: >
+ virsh attach-disk --config {{ item.item[0].name }} {{ libvirt_volume_path }}/{{ item.item[0].name }}_{{ item.item[1] }}.img {{ item.item[1] }}
+ when: item is failed
+ with_items: "{{ vm_extradisks_check.results }}"
+
+# Generate the ironic node inventory files. Note that this
+# task *must* occur after the above vm tasks, because if
+# `vm_nodes` is defined the template depends on the
+# `node_mac_map` variable.
+- name: Write ironic node json files
+ template:
+ src: ../templates/ironic_nodes.json.j2
+ dest: "{{ working_dir }}/ironic_nodes.json"
--- /dev/null
+# NB: We use `virsh` here instead of the `virt` module because
+# these tasks may be called before the dependencies of the `virt`
+# module are satisfied.
+
+- name: Check if libvirt is available
+ command: >
+ virsh uri
+ ignore_errors: true
+ changed_when: false
+ register: libvirt_check
+ environment:
+ LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
+
+# If libvirt isn't available we can skip everything else.
+- when: libvirt_check is success
+ environment:
+ LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
+ block:
+
+ - when: vm_nodes
+ block:
+
+ # Check if the vm nodes exist.
+ - name: Check vm vms
+ command: >
+ virsh domid "{{ item.name }}"
+ with_items: "{{ vm_nodes }}"
+ ignore_errors: true
+ register: vm_check
+
+ # Destroy and undefine the vm nodes.
+ - name: Destroy vm vms
+ command:
+ virsh destroy "{{ item.item.name }}"
+ when: item is success
+ with_items: "{{ vm_check.results }}"
+ ignore_errors: true
+
+ - name: Undefine vm vms
+ command:
+ virsh undefine "{{ item.item.name }}"
+ when: item is success
+ with_items: "{{ vm_check.results }}"
+
+ # The `virsh vol-dumpxml ... > /dev/null` is here (and elsewhere) due to
+ # [1293804].
+ #
+ # [1293804]: https://bugzilla.redhat.com/show_bug.cgi?id=1293804
+ - name: Delete baremetal vm storage
+ shell: |
+ virsh vol-dumpxml --pool '{{ libvirt_volume_pool }}' \
+ '{{ item.name }}'.qcow2 2>&1 > /dev/null
+ virsh vol-delete --pool '{{ libvirt_volume_pool }}' \
+ '{{ item.name }}'.qcow2
+ with_items: "{{ vm_nodes }}"
+ ignore_errors: true
+
+ - name: Check volume pool
+ command: >
+ virsh pool-uuid "{{ libvirt_volume_pool }}"
+ register: pool_check
+ ignore_errors: true
+
+ # See https://www.redhat.com/archives/libvirt-users/2016-March/msg00123.html
+ # TL;DR: ensure that the pool really exists if the previous
+ # task says it does.
+ - name: Work around libvirt bug
+ shell: |
+ virsh pool-dumpxml "{{ libvirt_volume_pool }}" |
+ virsh pool-define /dev/stdin
+ when: pool_check is success
+
+ - name: Destroy volume pool
+ command: >
+ virsh pool-destroy "{{ libvirt_volume_pool }}"
+ when: pool_check is success
+ ignore_errors: true
+
+ - name: Undefine volume pool
+ command: >
+ virsh pool-undefine "{{ libvirt_volume_pool }}"
+ when: pool_check is success
+
+ - name: Get UID of pool user
+ command: id -u "{{ ansible_user_id }}"
+ register: pool_uid
+ changed_when: false
+ when: pool_check is success
+
+ - name: Destroy pool definition file
+ file:
+ path: "/run/user/{{ pool_uid.stdout }}/libvirt/storage/run/{{ libvirt_volume_pool }}.xml"
+ state: absent
+ when: pool_check is success
--- /dev/null
+<domain type='{{ libvirt_domain_type }}'>
+ <name>{{ item.name }}</name>
+ <memory unit='MiB'>{{ flavors[item.flavor].memory }}</memory>
+ <vcpu>{{ flavors[item.flavor].vcpu }}</vcpu>
+
+ {{baremetal_vm_xml|default('')}}
+
+ <os>
+ <type arch='{{ libvirt_arch }}'>hvm</type>
+ <boot dev='network'/>
+ <bootmenu enable='no'/>
+ </os>
+ <features>
+ <acpi/>
+ <apic/>
+ <pae/>
+ </features>
+ <cpu mode='host-passthrough'/>
+ <clock offset='utc'/>
+ <on_poweroff>destroy</on_poweroff>
+ <on_reboot>restart</on_reboot>
+ <on_crash>restart</on_crash>
+ <devices>
+ <disk type='volume' device='disk'>
+ <driver name='qemu' type='qcow2' cache='unsafe'/>
+ <source pool='{{ libvirt_volume_pool }}' volume='{{ item.name }}.qcow2'/>
+ <target dev='{{ libvirt_diskdev }}' bus='{{ libvirt_diskbus }}'/>
+ </disk>
+{% if libvirt_diskbus == 'scsi' %}
+ <controller type='scsi' model='virtio-scsi' />
+{% endif %}
+{% for network in networks %}
+ <interface type='bridge'>
+ <mac address='{{ node_mac_map.get(item.name).get(network.name) }}'/>
+ <source bridge='{{ network.bridge }}'/>
+ <model type='{{ libvirt_nic_model }}'/>
+{% if network.virtualport_type is defined %}
+ <virtualport type='{{ network.virtualport_type }}'/>
+{% endif %}
+ </interface>
+{% endfor %}
+ <serial type='pty'/>
+ <console type='pty'/>
+
+{% if enable_vnc_console|bool %}
+ <input type='mouse' bus='ps2'/>
+ <graphics type='vnc' port='-1' autoport='yes'/>
+ <video>
+ <model type='cirrus' vram='9216' heads='1'/>
+ </video>
+{% endif %}
+
+ {{baremetal_vm_device_xml|default('')}}
+
+ </devices>
+</domain>
--- /dev/null
+{% set lvars = { 'host_ip' : '192.168.122.1', 'pxe_network' : False} %}
+{% for network in networks %}
+{% if (not (network.forward_mode is defined and network.forward_mode == 'nat') and lvars['pxe_network'] == False) %}
+{% if lvars.update({'pxe_network' : network.name}) %}{% endif %}
+{% endif %}
+{% if network.address is defined and lvars['host_ip'] == '192.168.122.1' %}
+{% if lvars.update({'host_ip' : network.address}) %}{% endif %}
+{% endif %}
+{% endfor %}
+{
+ "nodes": [
+ {% for node in vm_nodes %}
+ {
+ "name": "{{ node.name|replace('_', '-') }}",
+ "driver": "ipmi",
+ "resource_class": "baremetal",
+ "driver_info": {
+ "ipmi_username": "admin",
+ "ipmi_password": "password",
+ "ipmi_address": "{{ lvars['host_ip'] }}",
+ "ipmi_port": "{{ node.virtualbmc_port }}",
+ "deploy_kernel": "http://172.22.0.1/images/ironic-python-agent.kernel",
+ "deploy_ramdisk": "http://172.22.0.1/images/ironic-python-agent.initramfs"
+ },
+ "ports": [{
+ "address": "{{ node_mac_map.get(node.name).get(lvars['pxe_network']) }}",
+ "pxe_enabled": true
+ }],
+ "properties": {
+ "local_gb": "{{ flavors[node.flavor].disk }}",
+ "cpu_arch": "{{ libvirt_arch }}"
+ }
+ }
+ {% if not loop.last %}
+ ,
+ {% endif %}
+ {% endfor %}
+ ]
+}
--- /dev/null
+{% set nat_port_range = item.nat_port_range|default([1024, 65535]) %}
+{% set netmask = item.netmask|default('255.255.255.0') %}
+{% if item.dhcp_hosts is defined %}
+{% set dhcp_hosts_names = item.dhcp_hosts | map(attribute='name') | map('replace', '-', '_') | list %}
+{% endif %}
+<network>
+ <name>{{ item.name }}</name>
+ <bridge name='{{ item.bridge }}'/>
+{% if item.forward_mode is defined %}
+ <forward mode='{{ item.forward_mode }}'>
+{% if item.forward_mode == 'nat' %}
+ <nat>
+ <port start='{{ nat_port_range[0] }}' end='{{ nat_port_range[1] }}' />
+ </nat>
+{% endif %}
+ </forward>
+{% endif %}
+{% if item.virtualport_type is defined %}
+ <virtualport type='{{ item.virtualport_type }}'/>
+{% endif %}
+{% if item.address is defined and item.forward_mode != 'bridge' %}
+ <ip address='{{ item.address }}' netmask='{{ netmask }}'>
+{% if item.dhcp_range is defined %}
+ <dhcp>
+ <range start='{{ item.dhcp_range[0] }}' end='{{ item.dhcp_range[1] }}'/>
+ {% if item.dhcp_hosts is defined %}
+ {% for host in item.dhcp_hosts %}
+ <host mac='{{ node_mac_map.get(ironic_prefix + dhcp_hosts_names[loop.index0]).get(item.name) }}' name='{{ host.name }}' ip='{{ host.ip }}'/>
+ {% endfor %}
+ {% endif %}
+ </dhcp>
+{% endif %}
+ </ip>
+{% if item.domain is defined %}
+ <domain name='{{ item.domain }}' localOnly='yes'/>
+{% endif %}
+{% if item.dns is defined %}
+ <dns>
+ {% for host in item.dns.hosts %}
+ <host ip='{{ host.ip }}'>
+ {% for name in host.hostnames %}
+ <hostname>{{ name }}</hostname>
+ {% endfor %}
+ </host>
+ {% endfor %}
+ {% if item.dns.srvs is defined %}
+ {% for srv in item.dns.srvs %}
+ <srv service='{{ srv.name }}' protocol='{{ srv.protocol }}' domain='{{ srv.domain }}' port='{{ srv.port }}' target='{{ srv.target }}' />
+ {% endfor %}
+ {% endif %}
+ {% if item.dns.forwarders is defined %}
+ {% for forwarder in item.dns.forwarders %}
+ <forwarder domain='{{ forwarder.domain }}' addr='{{ forwarder.addr }}' />
+ {% endfor %}
+ {% endif %}
+ </dns>
+{% endif %}
+{% endif %}
+</network>
--- /dev/null
+<pool type='dir'>
+ <name>{{ libvirt_volume_pool }}</name>
+ <target>
+ <path>{{ libvirt_volume_path }}</path>
+ <permissions>
+ <mode>0755</mode>
+ <owner>-1</owner>
+ <group>-1</group>
+ </permissions>
+ </target>
+</pool>
--- /dev/null
+# Can be set to "teardown" to destroy a previous configuration
+virtbmc_action: setup
--- /dev/null
+#!/bin/bash -x
+
+name="$1"
+
+status=$(vbmc show -f value $name | grep status | cut -f2 -d' ')
+
+export PATH=$PATH:/usr/local/bin
+
+if [[ $status != "running" ]]; then
+ vbmc start $name
+fi
--- /dev/null
+---
+dependencies:
+ - common
--- /dev/null
+- include_tasks: setup_tasks.yml
+ when: virtbmc_action == "setup"
+- include_tasks: teardown_tasks.yml
+ when: virtbmc_action == "teardown"
--- /dev/null
+---
+
+- name: Create VirtualBMC directories
+ file:
+ path: "{{ item }}"
+ state: directory
+ mode: 0750
+ owner: root
+ group: root
+ with_items:
+ - "/etc/virtualbmc"
+ - "/var/log/virtualbmc"
+ become: true
+
+- name: Create VirtualBMC configuration file
+ copy:
+ mode: 0750
+ dest: "/etc/virtualbmc/virtualbmc.conf"
+ content: |
+ [default]
+ config_dir=/root/.vbmc
+ [log]
+ logfile=/var/log/virtualbmc/virtualbmc.log
+ debug=True
+ [ipmi]
+ session_timout=20
+ become: true
+
+- name: get virthost non_root_user userid
+ command: id -u {{ non_root_user }}
+ register: non_root_user_uid
+
+- name: set fact on non_root_user_uid
+ set_fact:
+ non_root_user_uid: "{{ non_root_user_uid.stdout }}"
+
+# The first network defined with an address will be used for vbmc access.
+- name: set vbmc address if there is a (nat) network defined with an address
+ set_fact:
+ vbmc_address: "{{ networks|selectattr('address', 'defined')|map(attribute='address')|list|first }}"
+ when: networks|selectattr('address', 'defined')|map(attribute='name')|list|length > 0
+
+# The connection uri is slightly different when using qemu:///system
+# and requires the root user.
+- name: set qemu uri for qemu:///system usage
+ set_fact:
+ vbmc_libvirt_uri: "qemu+ssh://root@{{ vbmc_address }}/system?&keyfile=/root/.ssh/id_rsa_virt_power&no_verify=1&no_tty=1"
+ when: libvirt_uri == "qemu:///system"
+
+- name: set qemu uri for qemu:///session usage
+ set_fact:
+ vbmc_libvirt_uri: "qemu+ssh://{{ non_root_user }}@{{ vbmc_address }}/session?socket=/run/user/{{ non_root_user_uid }}/libvirt/libvirt-sock&keyfile=/root/.ssh/id_rsa_virt_power&no_verify=1&no_tty=1"
+ when: vbmc_libvirt_uri is not defined
+
+- name: Start the Virtual BMCs (virtualbmc >= 1.4.0+) on Redhat-family OSes
+ service:
+ name: "virtualbmc"
+ state: started
+ enabled: true
+ when:
+ - ansible_os_family == "RedHat"
+ become: true
+
+- name: Start the Virtual BMCs (virtualbmc >= 1.4.0+) on ubuntu
+ shell: vbmcd || true
+ when:
+ - ansible_facts['distribution'] == "Ubuntu"
+ become: true
+
+
+- name: Create the Virtual BMCs
+ command: "vbmc add {{ item.name }} --port {{ item.virtualbmc_port }} --libvirt-uri {{ vbmc_libvirt_uri }}"
+ args:
+ creates: /root/.vbmc/{{ item.name }}/config
+ with_items: "{{ vm_nodes }}"
+ become: true
+ environment:
+ PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
+
+- name: Start the Virtual BMCs
+ script: vbmc_start.sh {{ item.name }}
+ with_items: "{{ vm_nodes }}"
+ become: true
--- /dev/null
+---
+
+- name: Remove virtualbmc directories
+ file:
+ path: "{{ item }}"
+ state: absent
+ with_items:
+ - "/etc/virtualbmc"
+ - "/var/log/virtualbmc"
+ - "/root/.vbmc/"
+ become: true
+
+- name: Stop/disable the Virtual BMCs (virtualbmc >= 1.4.0+) on CentOS
+ when:
+ - ansible_os_family == "RedHat"
+ service:
+ name: "virtualbmc"
+ state: "stopped"
+ enabled: false
+ become: true
+
+- name: Stop/disable the Virtual BMCs (virtualbmc >= 1.4.0+) on Ubuntu
+ when:
+ - ansible_distribution == 'Ubuntu'
+ shell: pkill vbmcd || true
+ become: true
--- /dev/null
+---
+- name: Setup dummy baremetal VMs
+ hosts: virthost
+ connection: local
+ gather_facts: true
+ tasks:
+ - import_role:
+ name: common
+ - import_role:
+ name: libvirt
+ - import_role:
+ name: virtbmc
+ when: vm_platform|default("libvirt") == "libvirt"
--- /dev/null
+---
+- name: Teardown previous libvirt setup
+ hosts: virthost
+ connection: local
+ gather_facts: true
+ tasks:
+ - import_role:
+ name: common
+ - import_role:
+ name: libvirt
+ vars:
+ libvirt_action: "teardown"
+ - import_role:
+ name: virtbmc
+ vars:
+ virtbmc_action: "teardown"
+