Remove unused metal3-vm code 64/4464/7
authorTodd Malsbary <todd.malsbary@intel.com>
Thu, 30 Sep 2021 22:44:15 +0000 (15:44 -0700)
committerKuralamudhan Ramakrishnan <kuralamudhan.ramakrishnan@intel.com>
Tue, 5 Oct 2021 00:01:56 +0000 (00:01 +0000)
Change-Id: Ibc69ef89b832ef24d088fd8b6412f332bbf8cdbb
Signed-off-by: Todd Malsbary <todd.malsbary@intel.com>
45 files changed:
Makefile
README.md
ci/jjb/shell/vagrant-verify.sh
cmd/bpa-operator/Makefile
cmd/bpa-operator/e2etest/bpa_vm_verifier.sh [deleted file]
deploy/metal3-vm/.gitignore [deleted file]
deploy/metal3-vm/01_install_requirements.sh [deleted file]
deploy/metal3-vm/02_configure_host.sh [deleted file]
deploy/metal3-vm/03_launch_mgmt_cluster.sh [deleted file]
deploy/metal3-vm/04_verify.sh [deleted file]
deploy/metal3-vm/05_host_cleanup.sh [deleted file]
deploy/metal3-vm/Makefile [deleted file]
deploy/metal3-vm/config_example.sh [deleted file]
deploy/metal3-vm/disable_apparmor_driver_libvirtd.sh [deleted file]
deploy/metal3-vm/lib/common.sh [deleted file]
deploy/metal3-vm/lib/logging.sh [deleted file]
deploy/metal3-vm/ubuntu_bridge_network_configuration.sh [deleted file]
deploy/metal3-vm/ubuntu_install_requirements.sh [deleted file]
deploy/metal3-vm/vm-setup/inventory.ini [deleted file]
deploy/metal3-vm/vm-setup/library/generate_macs.py [deleted file]
deploy/metal3-vm/vm-setup/roles/common/defaults/main.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/common/tasks/main.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/defaults/main.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/files/get-domain-ip.sh [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/meta/main.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/tasks/install_setup_tasks.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/tasks/main.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/tasks/network_setup_tasks.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/tasks/network_teardown_tasks.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/tasks/vm_setup_tasks.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/tasks/vm_teardown_tasks.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/templates/baremetalvm.xml.j2 [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/templates/ironic_nodes.json.j2 [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/templates/network.xml.j2 [deleted file]
deploy/metal3-vm/vm-setup/roles/libvirt/templates/volume_pool.xml.j2 [deleted file]
deploy/metal3-vm/vm-setup/roles/virtbmc/defaults/main.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/virtbmc/files/vbmc_start.sh [deleted file]
deploy/metal3-vm/vm-setup/roles/virtbmc/meta/main.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/virtbmc/tasks/main.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/virtbmc/tasks/setup_tasks.yml [deleted file]
deploy/metal3-vm/vm-setup/roles/virtbmc/tasks/teardown_tasks.yml [deleted file]
deploy/metal3-vm/vm-setup/setup-playbook.yml [deleted file]
deploy/metal3-vm/vm-setup/teardown-playbook.yml [deleted file]
figure-2.odg
figure-2.png

index 2f21037..84ff14b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,6 @@ SHELL:=/bin/bash
 ENV:=$(CURDIR)/env
 BMDIR:=$(CURDIR)/env/metal3
 METAL3DIR:=$(CURDIR)/deploy/metal3/scripts
-METAL3VMDIR:=$(CURDIR)/deploy/metal3-vm
 BPA_OPERATOR:=$(CURDIR)/cmd/bpa-operator/
 KUD_PATH:=$(CURDIR)/deploy/kud
 SDWAN_VERIFIER_PATH:=$(CURDIR)/sdwan/test
@@ -74,12 +73,6 @@ kud_vm_deploy:
 kud_bm_reset:
        pushd $(KUD_PATH) && ./kud_bm_launch.sh reset v1 && popd
 
-metal3_prerequisite:
-       pushd $(METAL3VMDIR) && make bmh_install && popd
-
-metal3_vm:
-       pushd $(METAL3VMDIR) && make bmh && popd
-
 sdwan_verifier:
        pushd $(SDWAN_VERIFIER_PATH) && bash sdwan_verifier.sh && popd
 
@@ -92,9 +85,6 @@ bpa_op_install_e2e:
 bpa_op_delete:
        pushd $(BPA_OPERATOR) && make delete && popd
 
-bpa_op_e2e_vm:
-       pushd $(BPA_OPERATOR) && make e2etest_vm && popd
-
 bpa_op_e2e_bmh:
        pushd $(BPA_OPERATOR) && make e2etest_bmh && popd
 
@@ -104,8 +94,6 @@ bpa_op_e2e_virtletvm:
 bpa_op_unit:
        pushd $(BPA_OPERATOR) && make unit_test && popd
 
-bpa_op_vm_verifier: bpa_op_install_e2e bpa_op_e2e_vm
-
 bpa_op_bmh_verifier: bpa_op_install_e2e bpa_op_e2e_bmh
 
 bpa_op_virtletvm_verifier: bpa_op_install bpa_op_e2e_virtletvm
@@ -142,14 +130,7 @@ bm_verifer: package_prerequisite \
        bpa_rest_api_verifier \
        clean_all
 
-verify_all: prerequisite \
-       metal3_prerequisite \
-       kud_bm_deploy_mini \
-       metal3_vm \
-       bpa_op_vm_verifier \
-       bpa_rest_api_verifier
-
-verifier: verify_all
+verifier: bm_verifer
 
 verify_nestedk8s: prerequisite \
        kud_vm_deploy \
@@ -168,4 +149,3 @@ kud_bm_verifier: prerequisite \
        clean_bm_packages
 
 .PHONY: all bm_preinstall bm_install bashate
-
index 070bd60..242156b 100644 (file)
--- a/README.md
+++ b/README.md
@@ -381,9 +381,8 @@ The following steps occurs once the `make install` command is given.
 ![Figure 2](figure-2.png)*Figure 2: Virtual Deployment Architecture*
 
 Virtual deployment is used for the development environment using
-Metal3 virtual deployment to create VM with PXE boot. VM Ansible
-scripts the node inventory file in /opt/ironic. No setting is required
-from the user to deploy the virtual deployment.
+Vagrant to create VMs with PXE boot. No setting is required from the
+user to deploy the virtual deployment.
 
 ### Snapshot Deployment Overview
 No snapshot is implemented in ICN R2.
@@ -395,11 +394,20 @@ Jump server is required to be installed with Ubuntu 18.04. This will
 install all the VMs and install the k8s clusters.
 
 #### Verifying the Setup - VMs
-`make verify_all` installs two VMs with name master-0 and worker-0
-with 8GB RAM and 8 vCPUs and installs k8s cluster on the VMs using the
-ICN BPA operator and install the ICN BPA REST API verifier. BPA
-operator installs the multi-cluster KUD to bring up k8s with all
-addons and plugins.
+To verify the virtual deployment, execute the following commands:
+``` shell
+$ vagrant up --no-parallel
+$ vagrant ssh jump
+vagrant@jump:~$ sudo su
+root@jump:/home/vagrant# cd /icn
+root@jump:/icn# make verifier
+```
+`vagrant up --no-parallel` creates three VMs: vm-jump, vm-machine-1,
+and vm-machine-2, each with 16GB RAM and 8 vCPUs. `make verifier`
+installs the ICN BPA operator and the ICN BPA REST API verifier into
+vm-jump, and then installs a k8s cluster on the vm-machine VMs using
+the ICN BPA operator. The BPA operator installs the multi-cluster KUD
+to bring up k8s with all addons and plugins.
 
 # Verifying the Setup
 ICN blueprint checks all the setup in both bare metal and VM
index 65148aa..40aeea1 100644 (file)
@@ -11,13 +11,9 @@ function clean_vm {{
 trap clean_vm EXIT
 
 vagrant destroy -f
-vagrant up
+vagrant up --no-parallel
 vagrant ssh -c "
 set -exuf
-sudo parted -a optimal /dev/sda ---pretend-input-tty resizepart 3 yes 100%
-sudo resize2fs /dev/sda3
-sudo apt update
-sudo apt install -y make
-cd /vagrant
+cd /icn
 sudo su -c 'make {target}'
 "
index f075b9a..528f58c 100644 (file)
@@ -57,10 +57,6 @@ delete:
 unit_test:
        go test ./pkg/controller/provisioning/
 
-.PHONY: e2etest_vm
-e2etest_vm:
-       ./e2etest/bpa_vm_verifier.sh
-
 .PHONY: e2etest_bmh
 e2etest_bmh:
        ./e2etest/bpa_bmh_verifier.sh
diff --git a/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh b/cmd/bpa-operator/e2etest/bpa_vm_verifier.sh
deleted file mode 100755 (executable)
index cdb4e5c..0000000
+++ /dev/null
@@ -1,231 +0,0 @@
-#!/usr/bin/env bash
-set -eu -o pipefail
-
-CLUSTER_NAME=cluster-test
-ADDONS_NAMESPACE=kud
-NUM_MASTERS=${NUM_MASTERS:-"1"}
-NUM_WORKERS=${NUM_WORKERS:-"1"}
-
-# Create provisioning CR file for testing
-cat <<EOF > e2etest/e2e_test_provisioning_cr.yaml
-apiVersion: bpa.akraino.org/v1alpha1
-kind: Provisioning
-metadata:
-  name: e2e-test-provisioning
-  labels:
-    cluster: ${CLUSTER_NAME}
-    owner: c1
-spec:
-  masters:
-EOF
-for ((master=0;master<NUM_MASTERS;++master)); do
-    mac=$(virsh domiflist "master_${master}" | awk '/provisioning/ {print $5}')
-    cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
-    - master-${master}:
-        mac-address: ${mac}
-EOF
-done
-cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
-  workers:
-EOF
-for ((worker=0;worker<NUM_WORKERS;++worker)); do
-    mac=$(virsh domiflist "worker_${worker}" | awk '/provisioning/ {print $5}')
-    cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
-    - worker-${worker}:
-        mac-address: ${mac}
-EOF
-done
-cat <<EOF >> e2etest/e2e_test_provisioning_cr.yaml
-  KUDPlugins:
-    - emco
-EOF
-kubectl apply -f e2etest/e2e_test_provisioning_cr.yaml
-sleep 5
-
-#Check Status of kud job pod
-status="Running"
-
-while [[ $status == "Running" ]]
-do
-    echo "KUD install job still running"
-    sleep 2m
-    stats=$(kubectl get pods |grep -i kud-${CLUSTER_NAME})
-    status=$(echo $stats | cut -d " " -f 3)
-done
-
-#Print logs of Job Pod
-jobPod=$(kubectl get pods|grep kud-${CLUSTER_NAME})
-podName=$(echo $jobPod | cut -d " " -f 1)
-printf "\nNow Printing Job pod logs\n"
-kubectl logs $podName
-
-if [[ $status == "Completed" ]];
-then
-   printf "KUD Install Job completed\n"
-   printf "Checking cluster status\n"
-
-   source ../../env/lib/common.sh
-   CLUSTER_KUBECONFIG=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
-   APISERVER=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl config view --minify -o jsonpath='{.clusters[0].cluster.server}')
-   TOKEN=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get secret $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode)
-   if ! call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure;
-   then
-       printf "\nKubernetes Cluster Install did not complete successfully\n"
-       exit 1
-   else
-       printf "\nKubernetes Cluster Install was successful\n"
-   fi
-
-else
-    printf "KUD Install Job failed\n"
-    exit 1
-fi
-
-function wait_for {
-    local -r interval=30
-    for ((try=0;try<600;try+=${interval})); do
-        echo "$(date +%H:%M:%S) - Waiting for $*"
-        sleep ${interval}s
-        if $*; then return 0; fi
-    done
-    return 1
-}
-
-function emco_ready {
-    KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n emco wait pod --all --for=condition=Ready --timeout=0s 1>/dev/null 2>/dev/null
-}
-
-function emcoctl_apply {
-    [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply -f $@ -v values.yaml |
-             awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]
-}
-
-function emcoctl_delete {
-    [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh delete -f $@ -v values.yaml |
-             awk '/Response Code:/ {code=$3} END{print code}') =~ 404 ]]
-}
-
-function emcoctl_instantiate {
-    [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply projects/kud/composite-apps/$@/v1/deployment-intent-groups/deployment/instantiate |
-             awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]
-}
-
-function emcoctl_terminate {
-    [[ $(/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/emcoctl.sh apply projects/kud/composite-apps/$@/v1/deployment-intent-groups/deployment/terminate |
-             awk '/Response Code:/ {code=$3} END{print code}') =~ 2.. ]]
-}
-
-function emcoctl {
-    local -r op=$1
-    shift
-
-    local -r interval=2
-    for ((try=0;try<600;try+=${interval})); do
-        if emco_ready; then break; fi
-        echo "$(date +%H:%M:%S) - Waiting for emco"
-        sleep ${interval}s
-    done
-
-    for ((;try<600;try+=${interval})); do
-       case ${op} in
-           "apply") if emcoctl_apply $@; then return 0; fi ;;
-           "delete") if emcoctl_delete $@; then return 0; fi ;;
-           "instantiate") if emcoctl_instantiate $@; then return 0; fi ;;
-           "terminate") if emcoctl_terminate $@; then return 0; fi ;;
-       esac
-        echo "$(date +%H:%M:%S) - Waiting for emcoctl ${op} $@"
-        sleep ${interval}s
-    done
-
-    return 1
-}
-
-function addons_instantiated {
-    KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} wait pod -l app.kubernetes.io/instance=r1 --for=condition=Ready --timeout=0s 1>/dev/null 2>/dev/null
-}
-
-function addons_terminated {
-    [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get pod -l app.kubernetes.io/instance=r1 --no-headers 2>/dev/null | wc -l) == 0 ]]
-}
-
-function networks_instantiated {
-    local -r count=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get sriovnetworknodestate --no-headers 2>/dev/null | wc -l)
-    local -r succeeded=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get sriovnetworknodestate -o jsonpath='{range .items[*]}{.status.syncStatus}{"\n"}{end}' 2>/dev/null | grep "Succeeded" | wc -l)
-    [[ $count == $succeeded ]]
-}
-
-function networks_terminated {
-    # The syncStatus will be the same whether we are instantiating or terminating an SR-IOV network
-    networks_instantiated
-}
-
-function kubevirt_instantiated {
-    [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get kubevirt -o jsonpath='{range .items[*]}{.status.phase}{"\n"}{end}' 2>/dev/null | grep "Deployed" | wc -l) == 1 ]]
-    [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get cdi -o jsonpath='{range .items[*]}{.status.phase}{"\n"}{end}' 2>/dev/null | grep "Deployed" | wc -l) == 1 ]]
-}
-
-function kubevirt_terminated {
-    [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get kubevirt --no-headers 2>/dev/null | wc -l) == 0 ]]
-    [[ $(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl -n ${ADDONS_NAMESPACE} get cdi --no-headers 2>/dev/null | wc -l) == 0 ]]
-}
-
-#Apply addons
-printf "Applying KUD addons\n"
-pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
-emcoctl apply 00-controllers.yaml
-emcoctl apply 01-cluster.yaml
-emcoctl apply 02-project.yaml
-emcoctl apply 03-addons-app.yaml
-popd
-
-#Instantiate addons
-emcoctl instantiate addons
-wait_for addons_instantiated
-emcoctl instantiate networks
-wait_for networks_instantiated
-emcoctl instantiate kubevirt
-wait_for kubevirt_instantiated
-
-#Test addons
-printf "Testing KUD addons\n"
-pushd /opt/kud/multi-cluster/addons/tests
-failed_kud_tests=""
-container_runtime=$(KUBECONFIG=${CLUSTER_KUBECONFIG} kubectl get nodes -o jsonpath='{.items[].status.nodeInfo.containerRuntimeVersion}')
-if [[ "${container_runtime}" == "containerd://1.2.13" ]]; then
-    #With containerd 1.2.13, the qat test container image fails to unpack.
-    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network cmk"
-else
-    kud_tests="topology-manager-sriov kubevirt multus ovn4nfv nfd sriov-network qat cmk"
-fi
-for test in ${kud_tests}; do
-    KUBECONFIG=${CLUSTER_KUBECONFIG} bash ${test}.sh || failed_kud_tests="${failed_kud_tests} ${test}"
-done
-KUBECONFIG=${CLUSTER_KUBECONFIG} DEMO_FOLDER=${PWD} PATH=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts:${PATH} bash plugin_fw_v2.sh --external || failed_kud_tests="${failed_kud_tests} plugin_fw_v2"
-if [[ ! -z "$failed_kud_tests" ]]; then
-    printf "Test cases failed:${failed_kud_tests}\n"
-    exit 1
-fi
-popd
-printf "All test cases passed\n"
-
-#Tear down setup
-printf "\n\nBeginning BMH E2E Test Teardown\n\n"
-emcoctl terminate kubevirt
-wait_for kubevirt_terminated
-emcoctl terminate networks
-wait_for networks_terminated
-emcoctl terminate addons
-wait_for addons_terminated
-pushd /opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/addons
-emcoctl delete 03-addons-app.yaml
-emcoctl delete 02-project.yaml
-emcoctl delete 01-cluster.yaml
-emcoctl delete 00-controllers.yaml
-popd
-kubectl delete -f e2etest/e2e_test_provisioning_cr.yaml
-kubectl delete job kud-${CLUSTER_NAME}
-kubectl delete --ignore-not-found=true configmap ${CLUSTER_NAME}-configmap
-rm e2etest/e2e_test_provisioning_cr.yaml
-rm -rf /opt/kud/multi-cluster/${CLUSTER_NAME}
-rm -rf /opt/kud/multi-cluster/addons
-make delete
diff --git a/deploy/metal3-vm/.gitignore b/deploy/metal3-vm/.gitignore
deleted file mode 100644 (file)
index 5484a3b..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-logs
-ironic.env
-config_*.sh
-!config_example.sh
-master-*.json
-worker-*.json
-master-*.yaml
-worker-*.yaml
-*.bk
-*.tmp
-*.swp
diff --git a/deploy/metal3-vm/01_install_requirements.sh b/deploy/metal3-vm/01_install_requirements.sh
deleted file mode 100755 (executable)
index ca9ce6c..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/usr/bin/env bash
-set -eu -o pipefail
-
-OS=$(awk -F= '/^ID=/ { print $2 }' /etc/os-release | tr -d '"')
-if [[ $OS == ubuntu ]]; then
-  # shellcheck disable=SC1091
-  source ubuntu_install_requirements.sh
-else
-  # shellcheck disable=SC1091
-  source centos_install_requirements.sh
-fi
diff --git a/deploy/metal3-vm/02_configure_host.sh b/deploy/metal3-vm/02_configure_host.sh
deleted file mode 100755 (executable)
index c316522..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/usr/bin/env bash
-set -eux -o pipefail
-
-# shellcheck disable=SC1091
-source lib/logging.sh
-# shellcheck disable=SC1091
-source lib/common.sh
-
-# Generate user ssh key
-if [ ! -f "$HOME/.ssh/id_rsa.pub" ]; then
-    ssh-keygen -f ~/.ssh/id_rsa -P ""
-fi
-
-# root needs a private key to talk to libvirt
-# See tripleo-quickstart-config/roles/virtbmc/tasks/configure-vbmc.yml
-if sudo [ ! -f /root/.ssh/id_rsa_virt_power ]; then
-  sudo ssh-keygen -f /root/.ssh/id_rsa_virt_power -P ""
-  sudo cat /root/.ssh/id_rsa_virt_power.pub | sudo tee -a /root/.ssh/authorized_keys
-fi
-
-ANSIBLE_FORCE_COLOR=true ansible-playbook \
-    -e "working_dir=$WORKING_DIR" \
-    -e "num_masters=$NUM_MASTERS" \
-    -e "num_workers=$NUM_WORKERS" \
-    -e "extradisks=$VM_EXTRADISKS" \
-    -e "virthost=$HOSTNAME" \
-    -e "platform=$NODES_PLATFORM" \
-    -e "manage_baremetal=$MANAGE_BR_BRIDGE" \
-    -i vm-setup/inventory.ini \
-    -b -vvv vm-setup/setup-playbook.yml
-
-# Allow local non-root-user access to libvirt
-# Restart libvirtd service to get the new group membership loaded
-if ! id "$USER" | grep -q libvirt; then
-  sudo usermod -a -G "libvirt" "$USER"
-  sudo systemctl restart libvirtd
-fi
-# Usually virt-manager/virt-install creates this: https://www.redhat.com/archives/libvir-list/2008-August/msg00179.html
-if ! virsh pool-uuid default > /dev/null 2>&1 ; then
-    virsh pool-define /dev/stdin <<EOF
-<pool type='dir'>
-  <name>default</name>
-  <target>
-    <path>/var/lib/libvirt/images</path>
-  </target>
-</pool>
-EOF
-    virsh pool-start default
-    virsh pool-autostart default
-fi
-
-if [[ $OS == ubuntu ]]; then
-  # source ubuntu_bridge_network_configuration.sh
-  # shellcheck disable=SC1091
-  source ubuntu_bridge_network_configuration.sh
-  # shellcheck disable=SC1091
-  source disable_apparmor_driver_libvirtd.sh
-else
-  if [ "$MANAGE_PRO_BRIDGE" == "y" ]; then
-      # Adding an IP address in the libvirt definition for this network results in
-      # dnsmasq being run, we don't want that as we have our own dnsmasq, so set
-      # the IP address here
-      if [ ! -e /etc/sysconfig/network-scripts/ifcfg-provisioning ] ; then
-          echo -e "DEVICE=provisioning\nTYPE=Bridge\nONBOOT=yes\nNM_CONTROLLED=no\nBOOTPROTO=static\nIPADDR=172.22.0.1\nNETMASK=255.255.255.0" | sudo dd of=/etc/sysconfig/network-scripts/ifcfg-provisioning
-      fi
-      sudo ip link set dev provisioning down || true
-      sudo ip link set dev provisioning up
-
-      # Need to pass the provision interface for bare metal
-      if [ "$PRO_IF" ]; then
-          echo -e "DEVICE=$PRO_IF\nTYPE=Ethernet\nONBOOT=yes\nNM_CONTROLLED=no\nBRIDGE=provisioning" | sudo dd of="/etc/sysconfig/network-scripts/ifcfg-$PRO_IF"
-          sudo ip link set dev "$PRO_IF" down || true
-          sudo ip link set dev "$PRO_IF" up
-      fi
-  fi
-
-  if [ "$MANAGE_INT_BRIDGE" == "y" ]; then
-      # Create the baremetal bridge
-      if [ ! -e /etc/sysconfig/network-scripts/ifcfg-baremetal ] ; then
-          echo -e "DEVICE=baremetal\nTYPE=Bridge\nONBOOT=yes\nNM_CONTROLLED=no" | sudo dd of=/etc/sysconfig/network-scripts/ifcfg-baremetal
-      fi
-      sudo ip link set dev baremetal down || true
-      sudo ip link set dev baremetal up
-
-      # Add the internal interface to it if requests, this may also be the interface providing
-      # external access so we need to make sure we maintain dhcp config if its available
-      if [ "$INT_IF" ]; then
-          echo -e "DEVICE=$INT_IF\nTYPE=Ethernet\nONBOOT=yes\nNM_CONTROLLED=no\nBRIDGE=baremetal" | sudo dd of="/etc/sysconfig/network-scripts/ifcfg-$INT_IF"
-          if sudo nmap --script broadcast-dhcp-discover -e "$INT_IF" | grep "IP Offered" ; then
-              echo -e "\nBOOTPROTO=dhcp\n" | sudo tee -a /etc/sysconfig/network-scripts/ifcfg-baremetal
-              sudo systemctl restart network
-          else
-             sudo systemctl restart network
-          fi
-      fi
-  fi
-
-  # restart the libvirt network so it applies an ip to the bridge
-  if [ "$MANAGE_BR_BRIDGE" == "y" ] ; then
-      sudo virsh net-destroy baremetal
-      sudo virsh net-start baremetal
-      if [ "$INT_IF" ]; then #Need to bring UP the NIC after destroying the libvirt network
-          sudo ip link set dev "$INT_IF" up
-      fi
-  fi
-fi
-
-# Add firewall rules to ensure the IPA ramdisk can reach httpd, Ironic and the Inspector API on the host
-for port in 80 5050 6385 ; do
-    if ! sudo iptables -C INPUT -i provisioning -p tcp -m tcp --dport $port -j ACCEPT > /dev/null 2>&1; then
-        sudo iptables -I INPUT -i provisioning -p tcp -m tcp --dport $port -j ACCEPT
-    fi
-done
-
-# Allow ipmi to the virtual bmc processes that we just started
-if ! sudo iptables -C INPUT -i baremetal -p udp -m udp --dport 6230:6235 -j ACCEPT 2>/dev/null ; then
-    sudo iptables -I INPUT -i baremetal -p udp -m udp --dport 6230:6235 -j ACCEPT
-fi
-
-#Allow access to dhcp and tftp server for pxeboot
-for port in 67 69 ; do
-    if ! sudo iptables -C INPUT -i provisioning -p udp --dport $port -j ACCEPT 2>/dev/null ; then
-        sudo iptables -I INPUT -i provisioning -p udp --dport $port -j ACCEPT
-    fi
-done
-
-# Need to route traffic from the provisioning host.
-if [ "$EXT_IF" ]; then
-  sudo iptables -t nat -A POSTROUTING --out-interface "$EXT_IF" -j MASQUERADE
-  sudo iptables -A FORWARD --in-interface baremetal -j ACCEPT
-fi
-
-# Switch NetworkManager to internal DNS
-
-if [[ "$MANAGE_BR_BRIDGE" == "y" && $OS == "centos" ]] ; then
-  sudo mkdir -p /etc/NetworkManager/conf.d/
-  sudo crudini --set /etc/NetworkManager/conf.d/dnsmasq.conf main dns dnsmasq
-  if [ "$ADDN_DNS" ] ; then
-    echo "server=$ADDN_DNS" | sudo tee /etc/NetworkManager/dnsmasq.d/upstream.conf
-  fi
-  if systemctl is-active --quiet NetworkManager; then
-    sudo systemctl reload NetworkManager
-  else
-    sudo systemctl restart NetworkManager
-  fi
-fi
-
-for name in ironic ironic-inspector dnsmasq httpd mariadb ipa-downloader; do
-    sudo "${CONTAINER_RUNTIME}" ps | grep -w "$name$" && sudo "${CONTAINER_RUNTIME}" kill $name
-    sudo "${CONTAINER_RUNTIME}" ps --all | grep -w "$name$" && sudo "${CONTAINER_RUNTIME}" rm $name -f
-done
-rm -rf "$IRONIC_DATA_DIR"
-
-mkdir -p "$IRONIC_DATA_DIR/html/images"
-pushd "$IRONIC_DATA_DIR/html/images"
-BM_IMAGE=${BM_IMAGE:-"bionic-server-cloudimg-amd64.img"}
-BM_IMAGE_URL=${BM_IMAGE_URL:-"https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img"}
-if [ ! -f ${BM_IMAGE} ] ; then
-    curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
-    md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
-fi
-popd
-
-for IMAGE_VAR in IRONIC_IMAGE IRONIC_INSPECTOR_IMAGE IPA_DOWNLOADER_IMAGE; do
-    IMAGE=${!IMAGE_VAR}
-    sudo "${CONTAINER_RUNTIME}" pull "$IMAGE"
-done
-
-# set password for mariadb
-mariadb_password="$(echo "$(date;hostname)"|sha256sum |cut -c-20)"
-
-cat <<EOF > ${PWD}/ironic.env
-PROVISIONING_INTERFACE=provisioning
-DHCP_RANGE=172.22.0.10,172.22.0.100
-IPA_BASEURI=https://images.rdoproject.org/train/rdo_trunk/current-tripleo
-DEPLOY_KERNEL_URL=http://172.22.0.1/images/ironic-python-agent.kernel
-DEPLOY_RAMDISK_URL=http://172.22.0.1/images/ironic-python-agent.initramfs
-IRONIC_ENDPOINT=http://172.22.0.1:6385/v1/
-IRONIC_INSPECTOR_ENDPOINT=http://172.22.0.1:5050/v1/
-CACHEURL=http://172.22.0.1/images
-IRONIC_FAST_TRACK=false
-EOF
-
-# Start image downloader container
-sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name ipa-downloader \
-    --env-file "${PWD}/ironic.env" \
-    -v "$IRONIC_DATA_DIR:/shared" "${IPA_DOWNLOADER_IMAGE}" /usr/local/bin/get-resource.sh
-
-sudo "${CONTAINER_RUNTIME}" wait ipa-downloader
-
-if [ ! -e "$IRONIC_DATA_DIR/html/images/ironic-python-agent.kernel" ] ||
-   [ ! -e "$IRONIC_DATA_DIR/html/images/ironic-python-agent.initramfs" ]; then
-    echo "Failed to get ironic-python-agent"
-    exit 1
-fi
-
-# Start dnsmasq, http, mariadb, and ironic containers using same image
-# See this file for env vars you can set, like IP, DHCP_RANGE, INTERFACE
-sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name dnsmasq \
-    --env-file "${PWD}/ironic.env" \
-    -v "$IRONIC_DATA_DIR:/shared" --entrypoint /bin/rundnsmasq "${IRONIC_IMAGE}"
-
-# For available env vars, see:
-sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name httpd \
-    --env-file "${PWD}/ironic.env" \
-    -v "$IRONIC_DATA_DIR:/shared" --entrypoint /bin/runhttpd "${IRONIC_IMAGE}"
-
-# https://github.com/metal3-io/ironic/blob/master/runmariadb.sh
-sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name mariadb \
-    --env-file "${PWD}/ironic.env" \
-    -v "$IRONIC_DATA_DIR:/shared" --entrypoint /bin/runmariadb \
-    --env "MARIADB_PASSWORD=$mariadb_password" "${IRONIC_IMAGE}"
-
-# See this file for additional env vars you may want to pass, like IP and INTERFACE
-sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name ironic \
-    --env-file "${PWD}/ironic.env" \
-    --env "MARIADB_PASSWORD=$mariadb_password" \
-    -v "$IRONIC_DATA_DIR:/shared" "${IRONIC_IMAGE}"
-
-# Start Ironic Inspector
-sudo "${CONTAINER_RUNTIME}" run -d --net host --privileged --name ironic-inspector \
-    --env-file "${PWD}/ironic.env" \
-    -v "$IRONIC_DATA_DIR:/shared" "${IRONIC_INSPECTOR_IMAGE}"
diff --git a/deploy/metal3-vm/03_launch_mgmt_cluster.sh b/deploy/metal3-vm/03_launch_mgmt_cluster.sh
deleted file mode 100755 (executable)
index b72a30b..0000000
+++ /dev/null
@@ -1,180 +0,0 @@
-#!/usr/bin/env bash
-set -eux -o pipefail
-
-# shellcheck disable=SC1091
-source lib/logging.sh
-# shellcheck disable=SC1091
-source lib/common.sh
-
-eval "$(go env)"
-export GOPATH
-DEPLOYDIR="$(dirname "$PWD")"
-BMODIR=$DEPLOYDIR/metal3/scripts/bmo
-
-# Environment variables
-# M3PATH : Path to clone the metal3 dev env repo
-# BMOPATH : Path to clone the baremetal operator repo
-#
-# BMOREPO : Baremetal operator repository URL
-# BMOBRANCH : Baremetal operator repository branch to checkout
-# FORCE_REPO_UPDATE : discard existing directories
-#
-# BMO_RUN_LOCAL : run the baremetal operator locally (not in Kubernetes cluster)
-
-M3PATH="${GOPATH}/src/github.com/metal3-io"
-BMOPATH="${M3PATH}/baremetal-operator"
-
-BMOREPO="${BMOREPO:-https://github.com/metal3-io/baremetal-operator.git}"
-BMOBRANCH="${BMOBRANCH:-10eb5aa3e614d0fdc6315026ebab061cbae6b929}"
-FORCE_REPO_UPDATE="${FORCE_REPO_UPDATE:-true}"
-
-BMO_RUN_LOCAL="${BMO_RUN_LOCAL:-false}"
-COMPUTE_NODE_PASSWORD="${COMPUTE_NODE_PASSWORD:-mypasswd}"
-BM_IMAGE=${BM_IMAGE:-"bionic-server-cloudimg-amd64.img"}
-IMAGE_URL=http://172.22.0.1/images/${BM_IMAGE}
-IMAGE_CHECKSUM=http://172.22.0.1/images/${BM_IMAGE}.md5sum
-
-function clone_repos {
-    mkdir -p "${M3PATH}"
-    if [[ -d ${BMOPATH} && "${FORCE_REPO_UPDATE}" == "true" ]]; then
-      rm -rf "${BMOPATH}"
-    fi
-    if [ ! -d "${BMOPATH}" ] ; then
-        pushd "${M3PATH}"
-        git clone "${BMOREPO}"
-        popd
-    fi
-    pushd "${BMOPATH}"
-    git checkout "${BMOBRANCH}"
-    git pull -r || true
-    popd
-}
-
-function launch_baremetal_operator {
-    docker pull $IRONIC_BAREMETAL_IMAGE
-    kubectl apply -f $BMODIR/namespace/namespace.yaml
-    kubectl apply -f $BMODIR/rbac/service_account.yaml -n metal3
-    kubectl apply -f $BMODIR/rbac/role.yaml -n metal3
-    kubectl apply -f $BMODIR/rbac/role_binding.yaml
-    kubectl apply -f $BMODIR/crds/metal3.io_baremetalhosts_crd.yaml
-    kubectl apply -f $BMODIR/operator/no_ironic/operator.yaml -n metal3
-}
-
-# documentation for the values below may be found at
-# https://cloudinit.readthedocs.io/en/latest/topics/modules.html
-function create_userdata {
-    name="$1"
-    COMPUTE_NODE_FQDN="$name.akraino.icn.org"
-    printf "#cloud-config\n" > $name-userdata.yaml
-    if [ -n "$COMPUTE_NODE_PASSWORD" ]; then
-        printf "password: ""%s" "$COMPUTE_NODE_PASSWORD" >>  $name-userdata.yaml
-        printf "\nchpasswd: {expire: False}\n" >>  $name-userdata.yaml
-        printf "ssh_pwauth: True\n" >>  $name-userdata.yaml
-    fi
-
-    if [ -n "$COMPUTE_NODE_FQDN" ]; then
-        printf "fqdn: ""%s" "$COMPUTE_NODE_FQDN" >>  $name-userdata.yaml
-        printf "\n" >>  $name-userdata.yaml
-    fi
-    printf "disable_root: false\n" >> $name-userdata.yaml
-    printf "ssh_authorized_keys:\n  - " >> $name-userdata.yaml
-
-    if [ ! -f $HOME/.ssh/id_rsa.pub ]; then
-        yes y | ssh-keygen -t rsa -N "" -f $HOME/.ssh/id_rsa
-    fi
-
-    cat $HOME/.ssh/id_rsa.pub >> $name-userdata.yaml
-    cloud_init_scripts >> $name-userdata.yaml
-    printf "\n" >> $name-userdata.yaml
-}
-
-function cloud_init_scripts {
-    # set_dhcp_indentifier.sh:
-    #   The IP address assigned to the provisioning NIC will change
-    #   due to IPA using the MAC address as the client ID and systemd
-    #   using a different ID.  Tell systemd to use the MAC as the
-    #   client ID.  We can't do this in the network data as only the
-    #   JSON format is supported by metal3, and the JSON format does
-    #   not support the dhcp-identifier field.
-    cat << 'EOF'
-write_files:
-- path: /var/lib/cloud/scripts/per-instance/set_dhcp_identifier.sh
-  owner: root:root
-  permissions: '0777'
-  content: |
-    #!/usr/bin/env bash
-    set -eux -o pipefail
-    sed -i -e '/dhcp4: true$/!b' -e 'h;s/\S.*/dhcp-identifier: mac/;H;g' /etc/netplan/50-cloud-init.yaml
-    netplan apply
-EOF
-}
-
-function apply_userdata_credential {
-    name="$1"
-    cat <<EOF > ./$name-user-data-credential.yaml
-apiVersion: v1
-data:
-  userData: $(base64 -w 0 $name-userdata.yaml)
-kind: Secret
-metadata:
-  name: $name-user-data
-  namespace: metal3
-type: Opaque
-EOF
-    kubectl apply -n metal3 -f $name-user-data-credential.yaml
-}
-
-function create_networkdata {
-    name="$1"
-    node_networkdata $name > $name-networkdata.json
-}
-
-function apply_networkdata_credential {
-    name="$1"
-    cat <<EOF > ./$name-network-data-credential.yaml
-apiVersion: v1
-data:
-  networkData: $(base64 -w 0 $name-networkdata.json)
-kind: Secret
-metadata:
-  name: $name-network-data
-  namespace: metal3
-type: Opaque
-EOF
-    kubectl apply -n metal3 -f $name-network-data-credential.yaml
-}
-
-function make_bm_hosts {
-    while IFS=',' read -r name address user password mac; do
-        create_userdata $name
-        apply_userdata_credential $name
-        create_networkdata $name
-        apply_networkdata_credential $name
-        GO111MODULE=auto go run "${BMOPATH}"/cmd/make-bm-worker/main.go \
-           -address "$address" \
-           -password "$password" \
-           -user "$user" \
-           -boot-mac "$mac" \
-           "$name" > $name-bm-node.yaml
-        printf "  image:" >> $name-bm-node.yaml
-        printf "\n    url: ""%s" "${IMAGE_URL}" >> $name-bm-node.yaml
-        printf "\n    checksum: ""%s" "${IMAGE_CHECKSUM}" >> $name-bm-node.yaml
-        printf "\n  userData:" >> $name-bm-node.yaml
-        printf "\n    name: ""%s" "$name""-user-data" >> $name-bm-node.yaml
-        printf "\n    namespace: metal3" >> $name-bm-node.yaml
-        printf "\n  networkData:" >> $name-bm-node.yaml
-        printf "\n    name: ""%s" "$name""-network-data" >> $name-bm-node.yaml
-        printf "\n    namespace: metal3" >> $name-bm-node.yaml
-        printf "\n  rootDeviceHints:" >> $name-bm-node.yaml
-        printf "\n    minSizeGigabytes: 48\n" >> $name-bm-node.yaml
-        kubectl apply -f $name-bm-node.yaml -n metal3
-    done
-}
-
-function apply_bm_hosts {
-    list_nodes | make_bm_hosts
-}
-
-clone_repos
-launch_baremetal_operator
-apply_bm_hosts
diff --git a/deploy/metal3-vm/04_verify.sh b/deploy/metal3-vm/04_verify.sh
deleted file mode 100755 (executable)
index 70fbf22..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env bash
-set -eu -o pipefail
-
-# shellcheck disable=SC1091
-source lib/common.sh
-
-declare -i timeout=30
-declare -i interval=60
-
-function check_provisioned {
-    declare -i prev_host_state=0
-    declare -i j=0
-    echo "VM state: 1 means provisioned & 0 means not yet provisioned"
-    while IFS=',' read -r name address user password mac; do
-        declare -i current_host_state=0
-        state=$(kubectl get baremetalhosts $name -n metal3 -o json | jq -r '.status.provisioning.state')
-        echo "VM host metal3 state - "$name" : "$state
-
-        if [ $state == "provisioned" ];then
-            current_host_state=1
-        fi
-
-        echo "VM $name current_host_state : "$current_host_state
-        echo "VMs      prev_host_state    : "$prev_host_state
-
-         if [ $j -eq 0 ]; then
-            prev_host_state=$current_host_state
-            ((j+=1))
-            continue
-        fi
-
-        if [ $current_host_state -eq 1 ] && [ $prev_host_state -eq 1 ]; then
-            prev_host_state=1
-        else
-            prev_host_state=0
-        fi
-
-        echo "All VM hosts aggregated state - prev_host_state:"$prev_host_state
-        ((j+=1))
-    done
-    return $prev_host_state
-}
-
-function warm_up_time {
-    echo "Wait for 75s for all VM to reboot and network is up"
-    sleep 75
-}
-
-function wait_for_provisioned {
-    declare -i k=1
-    declare -i t=$timeout
-    while ((t > 0)); do
-        echo "Try $k/$timeout iteration : Wait for $interval seconds to check all bmh state"
-        sleep $interval
-        if ! list_nodes | check_provisioned; then
-            echo "All the VMs are provisioned - success"
-            warm_up_time
-            exit 0
-        fi
-        ((t-=1))
-        ((k+=1))
-    done
-    exit 1
-}
-
-function verify_bm_hosts {
-    wait_for_provisioned
-}
-
-verify_bm_hosts
diff --git a/deploy/metal3-vm/05_host_cleanup.sh b/deploy/metal3-vm/05_host_cleanup.sh
deleted file mode 100755 (executable)
index 1e69d12..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env bash
-set -eux -o pipefail
-
-# shellcheck disable=SC1091
-source lib/logging.sh
-# shellcheck disable=SC1091
-source lib/common.sh
-
-BMO_RUN_LOCAL="${BMO_RUN_LOCAL:-false}"
-CAPBM_RUN_LOCAL="${CAPBM_RUN_LOCAL:-false}"
-
-# Kill and remove the running ironic containers
-for name in ironic ironic-inspector dnsmasq httpd mariadb ipa-downloader; do
-    sudo "${CONTAINER_RUNTIME}" ps | grep -w "$name$" && sudo "${CONTAINER_RUNTIME}" kill $name
-    sudo "${CONTAINER_RUNTIME}" ps --all | grep -w "$name$" && sudo "${CONTAINER_RUNTIME}" rm $name -f
-done
-
-# Kill the locally running operators
-if [ "${BMO_RUN_LOCAL}" = true ]; then
-  kill "$(pgrep "operator-sdk")" 2> /dev/null || true
-fi
-if [ "${CAPBM_RUN_LOCAL}" = true ]; then
-  CAPBM_PARENT_PID="$(pgrep -f "go run ./cmd/manager/main.go")"
-  if [[ "${CAPBM_PARENT_PID}" != "" ]]; then
-    CAPBM_GO_PID="$(pgrep -P "${CAPBM_PARENT_PID}" )"
-    kill "${CAPBM_GO_PID}"  2> /dev/null || true
-  fi
-fi
-
-
-ANSIBLE_FORCE_COLOR=true ansible-playbook \
-    -e "working_dir=$WORKING_DIR" \
-    -e "num_masters=$NUM_MASTERS" \
-    -e "num_workers=$NUM_WORKERS" \
-    -e "extradisks=$VM_EXTRADISKS" \
-    -e "virthost=$HOSTNAME" \
-    -e "manage_baremetal=$MANAGE_BR_BRIDGE" \
-    -i vm-setup/inventory.ini \
-    -b -vvv vm-setup/teardown-playbook.yml
-
-sudo rm -rf /etc/NetworkManager/conf.d/dnsmasq.conf
-# There was a bug in this file, it may need to be recreated.
-if [ "$MANAGE_PRO_BRIDGE" == "y" ]; then
-    sudo ip link set dev provisioning down || true
-    sudo rm -f /etc/sysconfig/network-scripts/ifcfg-provisioning || true
-fi
-# Leaving this around causes issues when the host is rebooted
-if [ "$MANAGE_BR_BRIDGE" == "y" ]; then
-    sudo ip link set dev baremetal down || true
-    sudo rm -f /etc/sysconfig/network-scripts/ifcfg-baremetal || true
-fi
-
-rm -rf $WORKING_DIR
-rm -rf $IRONIC_DATA_DIR
diff --git a/deploy/metal3-vm/Makefile b/deploy/metal3-vm/Makefile
deleted file mode 100644 (file)
index 29a4ab3..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-all: bmh_install bmh_configure bmh_launch bmh_verify
-
-bmh: bmh_configure bmh_launch bmh_verify
-
-bmh_install:
-       ./01_install_requirements.sh
-
-bmh_configure:
-       ./02_configure_host.sh
-
-bmh_launch:
-       ./03_launch_mgmt_cluster.sh
-
-bmh_verify:
-       ./04_verify.sh
-
-bmh_clean:
-       ./05_host_cleanup.sh
-
-.PHONY: all bmh bmh_install bmh_configure bmh_launch bmh_verify bmh_clean
diff --git a/deploy/metal3-vm/config_example.sh b/deploy/metal3-vm/config_example.sh
deleted file mode 100644 (file)
index 9f53d24..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-
-#
-# This is the subnet used on the "baremetal" libvirt network, created as the
-# primary network interface for the virtual bare metalhosts.
-#
-# Default of 192.168.111.0/24 set in lib/common.sh
-#
-#export EXTERNAL_SUBNET="192.168.111.0/24"
-
-#
-# This SSH key will be automatically injected into the provisioned host
-# by the provision_host.sh script.
-#
-# Default of ~/.ssh/id_rsa.pub is set in lib/common.sh
-#
-#export SSH_PUB_KEY=~/.ssh/id_rsa.pub
-
-#
-# Select the Container Runtime, can be "docker" or "containerd"
-# Defaults to "docker"
-#
-#export CONTAINER_RUNTIME="docker"
-
-#
-# Set the Baremetal Operator repository to clone
-#
-#export BMOREPO="${BMOREPO:-https://github.com/metal3-io/baremetal-operator.git}"
-
-#
-# Set the Baremetal Operator branch to checkout
-#
-#export BMOBRANCH="${BMOBRANCH:-master}"
-
-#
-# Force deletion of the BMO and CAPBM repositories before cloning them again
-#
-#export FORCE_REPO_UPDATE="${FORCE_REPO_UPDATE:-false}"
-
-#
-# Run a local baremetal operator instead of deploying in Kubernetes
-#
-#export BMO_RUN_LOCAL=true
diff --git a/deploy/metal3-vm/disable_apparmor_driver_libvirtd.sh b/deploy/metal3-vm/disable_apparmor_driver_libvirtd.sh
deleted file mode 100644 (file)
index 5aca6d6..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env bash
-set -eu -o pipefail
-
-selinux="#security_driver = \"selinux\""
-apparmor="security_driver = \"apparmor\""
-none="security_driver = \"none\""
-sudo sed -i "s/$selinux/$none/g" /etc/libvirt/qemu.conf
-sudo sed -i "s/$apparmor/$none/g" /etc/libvirt/qemu.conf
-sudo systemctl restart libvirtd
diff --git a/deploy/metal3-vm/lib/common.sh b/deploy/metal3-vm/lib/common.sh
deleted file mode 100644 (file)
index cc48dd6..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/env bash
-set -eu -o pipefail
-
-eval "$(go env)"
-
-SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-USER=`whoami`
-
-# Get variables from the config file
-if [ -z "${CONFIG:-}" ]; then
-    # See if there's a config_$USER.sh in the SCRIPTDIR
-    if [ ! -f ${SCRIPTDIR}/../config_${USER}.sh ]; then
-        cp ${SCRIPTDIR}/../config_example.sh ${SCRIPTDIR}/../config_${USER}.sh
-        echo "Automatically created config_${USER}.sh with default contents."
-    fi
-    CONFIG="${SCRIPTDIR}/../config_${USER}.sh"
-fi
-source $CONFIG
-
-# Set variables
-# Additional DNS
-ADDN_DNS=${ADDN_DNS:-}
-# External interface for routing traffic through the host
-EXT_IF=${EXT_IF:-}
-# Provisioning interface
-PRO_IF=${PRO_IF:-}
-# Does libvirt manage the baremetal bridge (including DNS and DHCP)
-MANAGE_BR_BRIDGE=${MANAGE_BR_BRIDGE:-y}
-# Only manage bridges if is set
-MANAGE_PRO_BRIDGE=${MANAGE_PRO_BRIDGE:-y}
-MANAGE_INT_BRIDGE=${MANAGE_INT_BRIDGE:-y}
-# Internal interface, to bridge virbr0
-INT_IF=${INT_IF:-}
-#Container runtime
-CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-"docker"}
-
-export EXTERNAL_SUBNET="192.168.111.0/24"
-#Ironic data directory
-IRONIC_DATA_DIR=${IRONIC_DATA_DIR:-"/opt/ironic"}
-export SSH_PUB_KEY=~/.ssh/id_rsa.pub
-
-FILESYSTEM=${FILESYSTEM:="/"}
-
-WORKING_DIR=${WORKING_DIR:-"/opt/metal3-vm"}
-NODES_FILE=${NODES_FILE:-"${WORKING_DIR}/ironic_nodes.json"}
-NODES_PLATFORM=${NODES_PLATFORM:-"libvirt"}
-
-export NUM_MASTERS=${NUM_MASTERS:-"1"}
-export NUM_WORKERS=${NUM_WORKERS:-"1"}
-export VM_EXTRADISKS=${VM_EXTRADISKS:-"false"}
-
-# Ironic vars
-export IRONIC_IMAGE=${IRONIC_IMAGE:-"integratedcloudnative/ironic:v1.0-icn"}
-export IRONIC_INSPECTOR_IMAGE=${IRONIC_INSPECTOR_IMAGE:-"integratedcloudnative/ironic-inspector:v1.0-icn"}
-export IRONIC_BAREMETAL_IMAGE=${IRONIC_BAREMETAL_IMAGE:-"integratedcloudnative/baremetal-operator:v2.0-icn"}
-export IPA_DOWNLOADER_IMAGE=${IPA_DOWNLOADER_IMAGE:-"integratedcloudnative/ironic-ipa-downloader:v1.0-icn"}
-
-# Verify requisites/permissions
-# Connect to system libvirt
-export LIBVIRT_DEFAULT_URI=qemu:///system
-if [ "$USER" != "root" -a "${XDG_RUNTIME_DIR:-}" == "/run/user/0" ] ; then
-    echo "Please use a non-root user, WITH a login shell (e.g. su - USER)"
-    exit 1
-fi
-
-# Check if sudo privileges without password
-if ! sudo -n uptime &> /dev/null ; then
-  echo "sudo without password is required"
-  exit 1
-fi
-
-# Check OS
-OS=$(awk -F= '/^ID=/ { print $2 }' /etc/os-release | tr -d '"')
-if [[ ! $OS =~ ^(centos|rhel|ubuntu)$ ]]; then
-  echo "Unsupported OS"
-  exit 1
-fi
-
-# Check CentOS version
-os_version=$(awk -F= '/^VERSION_ID=/ { print $2 }' /etc/os-release | tr -d '"' | cut -f1 -d'.')
-if [[ ${os_version} -ne 7 ]] && [[ ${os_version} -ne 18 ]]; then
-  echo "Required CentOS 7 or RHEL 7 or Ubuntu 18.04"
-  exit 1
-fi
-
-# Check d_type support
-FSTYPE=$(df ${FILESYSTEM} --output=fstype | grep -v Type)
-
-case ${FSTYPE} in
-  'ext4'|'btrfs')
-  ;;
-  'xfs')
-    if [[ $(xfs_info ${FILESYSTEM} | grep -q "ftype=1") ]]; then
-      echo "Filesystem not supported"
-      exit 1
-    fi
-  ;;
-  *)
-    echo "Filesystem not supported"
-    exit 1
-  ;;
-esac
-
-if [ ! -d "$WORKING_DIR" ]; then
-  echo "Creating Working Dir"
-  sudo mkdir "$WORKING_DIR"
-  sudo chown "${USER}:${USER}" "$WORKING_DIR"
-  chmod 755 "$WORKING_DIR"
-fi
-
-function list_nodes {
-    # Includes -machine and -machine-namespace
-    cat $NODES_FILE | \
-        jq -r '.nodes[] | [
-           .name,
-           .driver + "://" + .driver_info.ipmi_address + (if .driver_info.ipmi_port then ":" + .driver_info.ipmi_port else "" end),
-           .driver_info.ipmi_username,
-           .driver_info.ipmi_password,
-           .ports[0].address
-           ] | @csv' | \
-        sed 's/"//g'
-}
-
-function node_networkdata {
-    name=$1
-    cat $NODES_FILE  | jq -r --arg name "$name" '.nodes[] | select(.name==$name) | .net'
-}
diff --git a/deploy/metal3-vm/lib/logging.sh b/deploy/metal3-vm/lib/logging.sh
deleted file mode 100644 (file)
index e936439..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-set -eu -o pipefail
-
-# Log output automatically
-LOGDIR="$(dirname $0)/logs"
-if [ ! -d "$LOGDIR" ]; then
-    mkdir -p "$LOGDIR"
-fi
-LOGFILE="$LOGDIR/$(basename $0 .sh)-$(date +%F-%H%M%S).log"
-echo "Logging to $LOGFILE"
-# Set fd 1 and 2 to write to the log file
-exec 1> >( tee "${LOGFILE}" ) 2>&1
diff --git a/deploy/metal3-vm/ubuntu_bridge_network_configuration.sh b/deploy/metal3-vm/ubuntu_bridge_network_configuration.sh
deleted file mode 100755 (executable)
index 60875cb..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-set -eux -o pipefail
-
-# shellcheck disable=SC1091
-source lib/logging.sh
-# shellcheck disable=SC1091
-source lib/common.sh
-
-if [ "$MANAGE_PRO_BRIDGE" == "y" ]; then
-     # Adding an IP address in the libvirt definition for this network results in
-     # dnsmasq being run, we don't want that as we have our own dnsmasq, so set
-     # the IP address here
-     sudo ip link add dev provisioning type bridge
-     sudo ip addr add dev provisioning 172.22.0.1/24
-     sudo ip link set provisioning up
-
-     # Need to pass the provision interface for bare metal
-     if [ "$PRO_IF" ]; then
-       sudo ip link set dev "$PRO_IF" master provisioning
-     fi
- fi
-
- if [ "$MANAGE_INT_BRIDGE" == "y" ]; then
-     # Create the baremetal bridge
-     if ! [[  $(ip a show baremetal) ]]; then
-       sudo ip link add dev baremetal type bridge
-       sudo ip addr add dev baremetal 192.168.111.1/24
-       sudo ip link set baremetal up
-     fi
-
-     # Add the internal interface to it if requests, this may also be the interface providing
-     # external access so we need to make sure we maintain dhcp config if its available
-     if [ "$INT_IF" ]; then
-       sudo ip link set dev "$INT_IF" master baremetal
-     fi
- fi
-
- # restart the libvirt network so it applies an ip to the bridge
- if [ "$MANAGE_BR_BRIDGE" == "y" ] ; then
-     sudo virsh net-destroy baremetal
-     sudo virsh net-start baremetal
-     if [ "$INT_IF" ]; then #Need to bring UP the NIC after destroying the libvirt network
-         sudo ip link set dev "$INT_IF" up
-     fi
- fi
diff --git a/deploy/metal3-vm/ubuntu_install_requirements.sh b/deploy/metal3-vm/ubuntu_install_requirements.sh
deleted file mode 100755 (executable)
index c39e1d1..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env bash
-set -eux -o pipefail
-
-# shellcheck disable=SC1091
-source lib/logging.sh
-# shellcheck disable=SC1091
-source lib/common.sh
-
-# Update to latest packages first
-sudo apt -y update
-
-# Install required packages
-
-sudo apt -y install \
-  crudini \
-  curl \
-  dnsmasq \
-  figlet \
-  golang \
-  zlib1g-dev \
-  libssl1.0-dev \
-  nmap \
-  patch \
-  psmisc \
-  python3-pip \
-  wget
-
-sudo update-alternatives --install /usr/bin/python python /usr/bin/python3 1
-sudo update-alternatives --install /usr/bin/pip pip /usr/bin/pip3 1
-
-# Install pyenv
-
-if [[  $(cat ~/.bashrc) != *PYENV_ROOT* ]]; then
-  if ! [ -d "$HOME/.pyenv" ] ; then
-     git clone git://github.com/yyuu/pyenv.git ~/.pyenv
-  fi
-  # shellcheck disable=SC2016
-  # shellcheck disable=SC2129
-  echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bashrc
-  # shellcheck disable=SC2016
-  echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bashrc
-  # shellcheck disable=SC2016
-  echo -e 'if command -v pyenv 1>/dev/null 2>&1; then\n  eval "$(pyenv init -)"\nfi' >> ~/.bashrc
-fi
-
-if [[ $PATH != *pyenv* ]]; then
-  export PYENV_ROOT="$HOME/.pyenv"
-  export PATH="$PYENV_ROOT/bin:$PATH"
-  if command -v pyenv 1>/dev/null 2>&1; then
-    eval "$(pyenv init -)"
-  fi
-fi
-
-# There are some packages which are newer in the tripleo repos
-
-# Setup yarn and nodejs repositories
-#sudo curl -sL https://dl.yarnpkg.com/rpm/yarn.repo -o /etc/yum.repos.d/yarn.repo
-curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
-#curl -sL https://rpm.nodesource.com/setup_10.x | sudo bash -
-echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
-
-# Add this repository to install Golang 1.12
-sudo add-apt-repository -y ppa:longsleep/golang-backports
-
-# Update some packages from new repos
-sudo apt -y update
-
-# make sure additional requirments are installed
-
-##No bind-utils. It is for host, nslookop,..., no need in ubuntu
-sudo apt -y install \
-  jq \
-  libguestfs-tools \
-  nodejs \
-  qemu-kvm \
-  libvirt-bin libvirt-clients libvirt-dev \
-  golang-go \
-  unzip \
-  yarn \
-  genisoimage
-
-# Install python packages not included as rpms
-sudo pip install \
-  ansible==2.8.2 \
-  lolcat \
-  yq \
-  virtualbmc==1.6.0 \
-  python-ironicclient \
-  python-ironic-inspector-client \
-  lxml \
-  netaddr \
-  requests \
-  setuptools \
-  libvirt-python==5.7.0 \
diff --git a/deploy/metal3-vm/vm-setup/inventory.ini b/deploy/metal3-vm/vm-setup/inventory.ini
deleted file mode 100644 (file)
index d6d53d4..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-[virthost]
-localhost
diff --git a/deploy/metal3-vm/vm-setup/library/generate_macs.py b/deploy/metal3-vm/vm-setup/library/generate_macs.py
deleted file mode 100644 (file)
index d4f09fb..0000000
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/python
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# generate_baremetal_macs method ripped from
-# openstack/tripleo-incubator/scripts/configure-vm
-
-import marshal
-import math
-import random
-
-DOCUMENTATION = '''
----
-module: generate_macs
-version_added: "2.0"
-short_description: Generate a list of Ethernet MAC addresses
-description:
-   - Generate a list of Ethernet MAC addresses suitable for baremetal testing.
-'''
-
-MAX_NUM_MACS = math.trunc(0xff / 2)
-
-
-def generate_baremetal_macs(nodes, networks):
-    """Generate an Ethernet MAC address suitable for baremetal testing."""
-    # NOTE(dprince): We generate our own bare metal MAC address's here
-    # instead of relying on libvirt so that we can ensure the
-    # locally administered bit is set low. (The libvirt default is
-    # to set the 2nd MSB high.) This effectively allows our
-    # fake baremetal VMs to more accurately behave like real hardware
-    # and fixes issues with bridge/DHCP configurations which rely
-    # on the fact that bridges assume the MAC address of the lowest
-    # attached NIC.
-    # MACs generated for a given machine will also be in sequential
-    # order, which matches how most BM machines are laid out as well.
-    # Additionally we increment each MAC by two places.
-    macs = []
-    count = len(nodes) * len(networks)
-
-    if count > MAX_NUM_MACS:
-        raise ValueError("The MAX num of MACS supported is %i  "
-                         "(you specified %i)." % (MAX_NUM_MACS, count))
-
-    # See the random number generator with the input so that MAC
-    # generation is idempotent.
-    random.seed(marshal.dumps(nodes + networks))
-
-    base_nums = [0x00,
-                 random.randint(0x00, 0xff),
-                 random.randint(0x00, 0xff),
-                 random.randint(0x00, 0xff),
-                 random.randint(0x00, 0xff)]
-    base_mac = ':'.join(map(lambda x: "%02x" % x, base_nums))
-
-    start = random.randint(0x00, 0xff)
-    if (start + (count * 2)) > 0xff:
-        # leave room to generate macs in sequence
-        start = 0xff - count * 2
-    for num in range(0, count * 2, 2):
-        mac = start + num
-        macs.append(base_mac + ":" + ("%02x" % mac))
-
-    result = {}
-    for node in nodes:
-        result[node['name']] = {}
-        for network in networks:
-            result[node['name']][network['name']] = macs.pop(0)
-
-    return result
-
-
-def main():
-    module = AnsibleModule(
-        argument_spec=dict(
-            nodes=dict(required=True, type='list'),
-            networks=dict(required=True, type='list')
-        )
-    )
-    result = generate_baremetal_macs(module.params["nodes"],
-                                     module.params["networks"])
-    module.exit_json(**result)
-
-# see http://docs.ansible.com/developing_modules.html#common-module-boilerplate
-from ansible.module_utils.basic import AnsibleModule  # noqa
-
-
-if __name__ == '__main__':
-    main()
diff --git a/deploy/metal3-vm/vm-setup/roles/common/defaults/main.yml b/deploy/metal3-vm/vm-setup/roles/common/defaults/main.yml
deleted file mode 100644 (file)
index 40751bc..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-non_root_user: "{{ lookup('env', 'USER') }}"
-
-# base domain to use
-cluster_domain: "{{ lookup('env', 'CLUSTER_DOMAIN') | default('ostest.test.metalkube.org', true) }}"
-
-# allow the nic model to be overridden
-libvirt_nic_model: virtio
-
-# These defaults are used if there are no flavor-specific
-# overrides configured.
-default_disk: 50
-default_memory: 16384
-default_vcpu: 8
-num_masters: 1
-num_workers: 1
-extradisks: false
-virtualbmc_base_port: 6230
-flavors:
-  master:
-    memory: '{{master_memory|default(default_memory)}}'
-    disk: '{{master_disk|default(default_disk)}}'
-    vcpu: '{{master_vcpu|default(default_vcpu)}}'
-    extradisks: '{{extradisks|bool}}'
-
-  worker:
-    memory: '{{worker_memory|default(default_memory)}}'
-    disk: '{{worker_disk|default(default_disk)}}'
-    vcpu: '{{worker_vcpu|default(default_vcpu)}}'
-    extradisks: '{{extradisks|bool}}'
-
-# An optional prefix for node names
-ironic_prefix: ""
-
-baremetal_network_cidr: "{{ lookup('env', 'EXTERNAL_SUBNET') | default('192.168.111.0/24', true) }}"
-
-# Set this to `false` if you don't want your vms
-# to have a VNC console available.
-enable_vnc_console: true
-
-# Path for volume storage
-libvirt_volume_path: "{{ working_dir }}/pool"
-
-# These ensure we're using privileged virt, so VMs persist over reboot
-libvirt_uri: qemu:///system
-ssh_user: root
diff --git a/deploy/metal3-vm/vm-setup/roles/common/tasks/main.yml b/deploy/metal3-vm/vm-setup/roles/common/tasks/main.yml
deleted file mode 100644 (file)
index 05c4dca..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
----
-
-- set_fact:
-    generate_vm_nodes: "{{vm_nodes is not defined}}"
-
-- name: Define vm_nodes if not already defined
-  when: generate_vm_nodes
-  block:
-    - name: Generate vm_nodes for "{{num_masters}}" masters
-      set_fact:
-        vm_nodes: "{{vm_nodes|default([]) + [
-                     {'name': ironic_prefix + 'master_%s'|format(item),
-                      'flavor': 'master',
-                      'virtualbmc_port': virtualbmc_base_port+item}]}}"
-      loop: "{{ range(0, num_masters|int)|list }}"
-    
-    - name: Generate vm_nodes for "{{num_workers}}" workers
-      set_fact:
-        vm_nodes: "{{vm_nodes|default([]) + [
-                     {'name': ironic_prefix + 'worker_%s'|format(item),
-                      'flavor': 'worker',
-                      'virtualbmc_port': virtualbmc_base_port+num_masters|int+item} ]}}"
-      loop: "{{ range(0, num_workers|int)|list }}"
-
-# Describe our virtual networks.  These networks will be attached to
-# the vm nodes in the order in which they are defined with the following caveats:
-#   *  The first bridge network defined will be used for pxe booting
-- set_fact:
-    generate_networks: "{{networks is not defined}}"
-- name: Define networks when not already defined
-  when: generate_networks
-  block:
-    - name: Generate dhcp entries on baremetal network for "{{num_masters}}" masters
-      set_fact:
-        dhcp_hosts: "{{dhcp_hosts|default([]) + [
-                       {'name': 'master-%s'|format(item),
-                        'ip': baremetal_network_cidr|nthhost(20+item)|string}]}}"
-      loop: "{{ range(0, num_masters|int)|list }}"
-    
-    - name: Generate dhcp entries on baremetal network for "{{num_workers}}" workers
-      set_fact:
-        dhcp_hosts: "{{dhcp_hosts|default([]) + [
-                       {'name': 'worker-%s'|format(item),
-                        'ip': baremetal_network_cidr|nthhost(20+num_masters|int+item)|string} ]}}"
-      loop: "{{ range(0, num_workers|int)|list }}"
-    
-    - name: Set fact for networks
-      set_fact:
-        networks:
-          - name: provisioning
-            bridge: provisioning
-            forward_mode: bridge
-          - name: baremetal
-            bridge: baremetal
-            forward_mode: "{% if manage_baremetal == 'y' %}nat{% else %}bridge{% endif %}"
-            address: "{{ baremetal_network_cidr|nthhost(1) }}"
-            netmask: "{{ baremetal_network_cidr|ipaddr('netmask') }}"
-            dhcp_range:
-              - "{{ baremetal_network_cidr|nthhost(20) }}"
-              - "{{ baremetal_network_cidr|nthhost(60) }}"
-            dhcp_hosts: "{{dhcp_hosts}}"
-            nat_port_range:
-              - 1024
-              - 65535
-            domain: "{{ cluster_domain }}"
-            dns:
-              hosts: "{{dns_extrahosts | default([])}}"
-              forwarders:
-                - domain: "apps.{{ cluster_domain }}"
-                  addr: "127.0.0.1"
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/defaults/main.yml b/deploy/metal3-vm/vm-setup/roles/libvirt/defaults/main.yml
deleted file mode 100644 (file)
index 3e477b0..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-# When libvirt_action==teardown we destroy the existing configuration
-libvirt_action: setup
-
-# For some baremetal testing we set this to "baremetal" so that only the
-# libvirt networking is configured, not the nodes
-vm_platform: libvirt
-
-# Which libvirt session should we use?  Using `qemu://session` does
-# not require privileged access (but does require the setup performed by the
-# `environment/setup` role).
-libvirt_volume_pool: oooq_pool
-libvirt_domain_type: kvm
-libvirt_diskdev: sda
-libvirt_diskbus: scsi
-libvirt_arch: x86_64
-libvirt_cpu_mode: host-model
-
-# how many disks should be created when using extradisks
-extradisks_list:
-  - vdb
-
-# size of the disks to create when using extradisks
-extradisks_size: 8G
-
-# The packages required to set up our desired libvirt environment.
-# (Tested on Centos 7)
-libvirt_packages:
-  - qemu-kvm
-  - libvirt
-  - libvirt-python
-  - libguestfs-tools
-  - python-lxml
-  - polkit-pkla-compat
-  - python-netaddr
-  - python2-virtualbmc
-
-# We expect virtualbmc to already be installed on rhel8 as a pre-req to running this,
-# as there's no rhel package available yet.
-libvirt_packages_rhel8:
-  - qemu-kvm
-  - libvirt
-  - python3-libvirt
-  - libguestfs-tools
-  - python3-lxml
-  - polkit-pkla-compat
-  - python3-netaddr
-
-ubuntu_libvirt_packages:
-  - qemu-kvm
-  - libvirt-bin
-  - libvirt-clients
-  - libvirt-dev
-  - python3-libvirt
-  - libguestfs-tools
-  - python3-lxml
-  - gir1.2-polkit-1.0
-  - libpolkit-agent-1-0
-  - libpolkit-backend-1-0
-  - libpolkit-gobject-1-0
-
-
-# The name of the libvirt service.
-libvirtd_service: libvirtd
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/files/get-domain-ip.sh b/deploy/metal3-vm/vm-setup/roles/libvirt/files/get-domain-ip.sh
deleted file mode 100644 (file)
index ad0fd61..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-set -eu -o pipefail
-
-# This script will attempt to get the ip address of the a given libvirt guest.
-
-PATH=$PATH:/usr/sbin:/sbin
-
-VMNAME=$1
-
-# Get the MAC address of the first interface by looking for looking for the
-# `<mac address...` line.  Yes, we're parsing XML with awk.  It's probably
-# safe (because the XML is coming from libvirt, so we can be reasonably
-# confident that the formatting will remain the same).
-mac=$(virsh dumpxml $VMNAME | awk -F "'" '/mac address/ { print $2; exit }')
-
-# Look up the MAC address in the ARP table.
-ip=$(ip neigh | grep $mac | awk '{print $1;}')
-
-if [ -z "$ip" ]; then
-    echo "vm ip is not available" >&2
-    exit 1
-fi
-
-echo $ip
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/meta/main.yml b/deploy/metal3-vm/vm-setup/roles/libvirt/meta/main.yml
deleted file mode 100644 (file)
index 9711b33..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
-  - role: common
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/install_setup_tasks.yml b/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/install_setup_tasks.yml
deleted file mode 100644 (file)
index 8e89b42..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-# Install the packages required for our desired libvirt environment.
-# We store the list of packages in `libvirt_packages` so that in
-# theory we can support multiple distributions simply by passing in a
-# different list of packages.
-- name: Install packages for libvirt
-  package:
-    name: "{{ libvirt_packages }}"
-    state: present
-  become: true
-  when: ansible_os_family == "RedHat" and ansible_lsb.major_release|int == 7
-
-- name: Install packages for libvirt
-  package:
-    name: "{{ libvirt_packages_rhel8 }}"
-    state: present
-  become: true
-  when: ansible_os_family == "RedHat" and ansible_lsb.major_release|int == 8
-
-- name: Install packages for libvirt on Ubuntu
-  when: 
-    - ansible_facts['distribution'] == "Ubuntu"
-  package:
-    name: "{{ ubuntu_libvirt_packages }}"
-    state: present
-  become: true
-
-- name: Start libvirtd
-  service:
-    name: "{{ libvirtd_service }}"
-    state: started
-    enabled: true
-  become: true
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/main.yml b/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/main.yml
deleted file mode 100644 (file)
index 2a7f2d7..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-- name: libvirt role setup tasks
-  block:
-    - include_tasks: install_setup_tasks.yml
-    - include_tasks: network_setup_tasks.yml
-    - include_tasks: vm_setup_tasks.yml
-      when: vm_platform == "libvirt"
-  when: libvirt_action == "setup"
-
-- name: libvirt role teardown tasks
-  block:
-    - include_tasks: network_teardown_tasks.yml
-    - include_tasks: vm_teardown_tasks.yml
-  when: libvirt_action == "teardown"
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/network_setup_tasks.yml b/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/network_setup_tasks.yml
deleted file mode 100644 (file)
index e527b70..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-# If virtualport_type is defined for any networks, include OVS dependencies
-- when: networks|selectattr('virtualport_type', 'defined')|map(attribute='name')|list|length > 0
-  block:
-
-  # Install OVS dependencies
-  - name: Install OVS dependencies
-    include_role:
-      name: 'parts/ovs'
-
-  # Create any OVS Bridges that have been defined
-  - name: Create OVS Bridges
-    openvswitch_bridge:
-      bridge: "{{ item.bridge }}"
-      state: present
-    when: item.virtualport_type is defined and item.virtualport_type == "openvswitch"
-    with_items: "{{ networks }}"
-    become: true
-
-# TODO(apuimedo) drop this back to vm tasks once we have proper DNS
-- name: get a list of MACs to use
-  generate_macs:
-    nodes: "{{ vm_nodes }}"
-    networks: "{{ networks }}"
-  register: node_mac_map
-  when: vm_nodes
-
-
-# Create the global, root-managed libvirt networks to which we will
-# attach the undercoud and vm virtual machines.
-- name: Create libvirt networks
-  virt_net:
-    command: define
-    state: present
-    name: "{{ item.name }}"
-    xml: '{{ lookup("template", "network.xml.j2") }}'
-  with_items: "{{ networks }}"
-  become: true
-
-- name: Start libvirt networks
-  virt_net:
-    command: start
-    name: "{{ item.name }}"
-    state: active
-  with_items: "{{ networks }}"
-  become: true
-
-- name: Mark  libvirt networks as autostarted
-  virt_net:
-    name: "{{ item.name }}"
-    autostart: "yes"
-  with_items: "{{ networks }}"
-  become: true
-  register: net_autostart
-  ignore_errors: true
-
-# https://bugs.launchpad.net/tripleo-quickstart/+bug/1581676
-# There is a bug w/ virt_net and RHEL where the network xml
-# file is not written to /etc/libvirt/qemu/networks/ This causes
-# network to be considered transient.
-- when: not net_autostart.changed
-  block:
-
-    - name: Check if "virsh net-autostart" was successful
-      debug: msg="Some libvirt networks were not set to autostart. Please see
-             https://bugs.launchpad.net/tripleo-quickstart/+bug/1581676"
-
-    # get the network xml from the running network
-    - name: Get libvirt networks xml
-      virt_net:
-        command: get_xml
-        name: "{{ item.name }}"
-      with_items: "{{ networks }}"
-      register: net_xml
-      become: true
-
-    # copy the xml to a file
-    - name: copy network-xml to file
-      copy: content={{ item.get_xml }} dest=/tmp/network-{{ item.item.name }}.xml
-      with_items: "{{ net_xml.results }}"
-      become: true
-
-    # redefine the network w/ virsh, this will write the xml file to
-    # /etc/libvirt/qemu/networks/ and it will no longer be transient
-    - name: redefine the libvirt networks so the config is written to /etc/libvirt
-      command: virsh net-define /tmp/network-{{ item.name }}.xml
-      with_items: "{{ networks }}"
-      become: true
-
-    # Now we're ready to mark the network autostart
-    - name: Mark libvirt networks as autostarted
-      virt_net:
-        name: "{{ item.name }}"
-        autostart: "yes"
-      with_items: "{{ networks }}"
-      become: true
-
-# Whitelist the bridges associated with these networks for
-# access using qemu [helper networking][helper].  Later on we
-# create virtual machines use an unprivileged `qemu://session`
-# connection, and we connect to the networks using the bridge names.
-#
-# [helper]: http://wiki.qemu.org/Features-Done/HelperNetworking
-- name: Whitelist bridges for unprivileged access on CentOS
-  lineinfile:
-    dest: '/etc/qemu-kvm/bridge.conf' # Needs to be /etc/qemu/bridge.conf if supporting Fedora
-    line: "allow {{ item.bridge }}"
-  with_items: "{{ networks }}"
-  when: 
-    - ansible_os_family == "RedHat"
-  become: true
-
-- name: Whitelist bridges for unprivileged access on Ubuntu or Fedora
-  lineinfile:
-    dest: '/etc/qemu/bridge.conf' 
-    line: "allow {{ item.bridge }}"
-    create: yes
-  with_items: "{{ networks }}"
-  when: 
-    - ansible_facts['distribution'] == "Ubuntu"
-  become: true
-
-# We're going to want to store things in `working_dir` so ensure it
-# exists first.  `working_dir` is a directory on the target host.
-- name: Ensure remote working dir exists
-  file:
-    path: "{{ working_dir }}"
-    state: directory
-  become: true
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/network_teardown_tasks.yml b/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/network_teardown_tasks.yml
deleted file mode 100644 (file)
index ab77e35..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-- name: Stop libvirt networks
-  virt_net:
-    command: destroy
-    name: "{{ item.name }}"
-    state: inactive
-  with_items: "{{ networks }}"
-  become: true
-
-- name: Delete libvirt networks
-  virt_net:
-    command: undefine
-    state: absent
-    name: "{{ item.name }}"
-  with_items: "{{ networks }}"
-  become: true
-
-- name: Delete bridges on Ubuntu
-  shell: |
-     sudo ip link set baremetal down 
-     sudo ip link set provisioning down
-     ip link del baremetal type bridge | true
-     ip link del provisioning type bridge | true
-
-  when:
-    - ansible_distribution == 'Ubuntu'
-
-  become: yes
-
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/vm_setup_tasks.yml b/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/vm_setup_tasks.yml
deleted file mode 100644 (file)
index 323aa3c..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-# Create a libvirt volume pool.  This is where we'll be creating
-# images for the VMs
-# Note: the virt_pool module is not working properly on rhel-7.2
-# https://bugs.launchpad.net/tripleo-quickstart/+bug/1597905
-- name: ensure libvirt volume path exists
-  become: true
-  file:
-    path: "{{ libvirt_volume_path }}"
-    state: directory
-    mode: 0755
-
-- name: Check volume pool
-  command: >
-    virsh pool-uuid "{{ libvirt_volume_pool }}"
-  register: pool_check
-  ignore_errors: true
-  changed_when: false
-  environment:
-    LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
-
-- name: create the volume pool xml file
-  template:
-    src: volume_pool.xml.j2
-    dest: "{{ working_dir }}/volume_pool.xml"
-  when: pool_check is failed
-
-- name: Define volume pool
-  command: "virsh pool-define {{ working_dir }}/volume_pool.xml"
-  when: pool_check is failed
-  environment:
-    LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
-
-- name: Start volume pool
-  virt_pool:
-    command: start
-    state: active
-    name: "{{ libvirt_volume_pool }}"
-    uri: "{{ libvirt_uri }}"
-
-# In some cases the pool_check can pass and the pool xml config is absent
-# In this case it is required to dump the xml and redefine the pool.
-- name: ensure tripleo-quickstart volume pool is defined
-  shell: >
-    virsh pool-dumpxml {{ libvirt_volume_pool }} |
-    virsh pool-define /dev/stdin
-  changed_when: true
-  environment:
-    LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
-
-- name: Mark volume pool for autostart
-  virt_pool:
-    name: "{{ libvirt_volume_pool }}"
-    autostart: "yes"
-    uri: "{{ libvirt_uri }}"
-
-- when: vm_nodes
-  environment:
-    LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
-  block:
-
-    # Create libvirt volumes for the vm hosts.
-    - name: Check if vm volumes exist
-      command: >
-        virsh vol-info --pool '{{ libvirt_volume_pool }}' '{{ item.name }}.qcow2'
-      register: vm_vol_check
-      ignore_errors: true
-      with_items: "{{ vm_nodes }}"
-
-    - name: Create vm vm storage
-      command: >
-        virsh vol-create-as '{{ libvirt_volume_pool }}'
-        '{{ item.item.name }}'.qcow2 '{{ flavors[item.item.flavor].disk }}'G
-        --format qcow2
-      when:
-        - item is failed
-      with_items: "{{ vm_vol_check.results }}"
-
-    # Define (but do not start) the vm nodes.  These will be
-    # booted later by ironic during the provisioning process.
-    - name: Define vm vms
-      virt:
-        name: "{{ item.name }}"
-        command: define
-        xml: "{{ lookup('template', 'baremetalvm.xml.j2') }}"
-        uri: "{{ libvirt_uri }}"
-      with_items: "{{ vm_nodes }}"
-
-    # Create additional blockdevices for each objectstorage flavor node
-    # These are sparse files, not using space if unused
-    - name: Create additional blockdevice for objectstorage nodes
-      command: >
-        dd if=/dev/zero of={{ libvirt_volume_path }}/{{ item[0].name }}_{{ item[1] }}.img bs=1 count=0 seek={{ extradisks_size }}
-      when: flavors[item[0].flavor].extradisks|default(false)
-      with_nested:
-        - "{{ vm_nodes }}"
-        - "{{ extradisks_list }}"
-
-    - name: Check if additional blockdevices are attached
-      command: >
-        virsh domblkinfo {{ item[0].name }} {{ libvirt_volume_path }}/{{ item[0].name }}_{{ item[1] }}.img
-      when: flavors[item[0].flavor].extradisks|default(false)
-      changed_when: false
-      ignore_errors: true
-      register: vm_extradisks_check
-      with_nested:
-        - "{{ vm_nodes }}"
-        - "{{ extradisks_list }}"
-
-    - name: Attach additional blockdevices to vm objectstorage VMs
-      command: >
-        virsh attach-disk --config {{ item.item[0].name }} {{ libvirt_volume_path }}/{{ item.item[0].name }}_{{ item.item[1] }}.img {{ item.item[1] }}
-      when: item is failed
-      with_items: "{{ vm_extradisks_check.results }}"
-
-# Generate the ironic node inventory files.  Note that this
-# task *must* occur after the above vm tasks, because if
-# `vm_nodes` is defined the template depends on the
-# `node_mac_map` variable.
-- name: Write ironic node json files
-  template:
-    src: ../templates/ironic_nodes.json.j2
-    dest: "{{ working_dir }}/ironic_nodes.json"
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/vm_teardown_tasks.yml b/deploy/metal3-vm/vm-setup/roles/libvirt/tasks/vm_teardown_tasks.yml
deleted file mode 100644 (file)
index d4745e7..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-# NB: We use `virsh` here instead of the `virt` module because
-# these tasks may be called before the dependencies of the `virt`
-# module are satisfied.
-
-- name: Check if libvirt is available
-  command: >
-    virsh uri
-  ignore_errors: true
-  changed_when: false
-  register: libvirt_check
-  environment:
-    LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
-
-# If libvirt isn't available we can skip everything else.
-- when: libvirt_check is success
-  environment:
-    LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}"
-  block:
-
-    - when: vm_nodes
-      block:
-
-        # Check if the vm nodes exist.
-        - name: Check vm vms
-          command: >
-            virsh domid "{{ item.name }}"
-          with_items: "{{ vm_nodes }}"
-          ignore_errors: true
-          register: vm_check
-
-        # Destroy and undefine the vm nodes.
-        - name: Destroy vm vms
-          command:
-            virsh destroy "{{ item.item.name }}"
-          when: item is success
-          with_items: "{{ vm_check.results }}"
-          ignore_errors: true
-
-        - name: Undefine vm vms
-          command:
-            virsh undefine "{{ item.item.name }}"
-          when: item is success
-          with_items: "{{ vm_check.results }}"
-
-        # The `virsh vol-dumpxml ... > /dev/null` is here (and elsewhere) due to
-        # [1293804].
-        #
-        # [1293804]: https://bugzilla.redhat.com/show_bug.cgi?id=1293804
-        - name: Delete baremetal vm storage
-          shell: |
-            virsh vol-dumpxml --pool '{{ libvirt_volume_pool }}' \
-              '{{ item.name }}'.qcow2 2>&1 > /dev/null
-            virsh vol-delete --pool '{{ libvirt_volume_pool }}' \
-              '{{ item.name }}'.qcow2
-          with_items: "{{ vm_nodes }}"
-          ignore_errors: true
-
-    - name: Check volume pool
-      command: >
-        virsh pool-uuid "{{ libvirt_volume_pool }}"
-      register: pool_check
-      ignore_errors: true
-
-    # See https://www.redhat.com/archives/libvirt-users/2016-March/msg00123.html
-    # TL;DR: ensure that the pool really exists if the previous
-    # task says it does.
-    - name: Work around libvirt bug
-      shell: |
-        virsh pool-dumpxml "{{ libvirt_volume_pool }}" |
-        virsh pool-define /dev/stdin
-      when: pool_check is success
-
-    - name: Destroy volume pool
-      command: >
-        virsh pool-destroy "{{ libvirt_volume_pool }}"
-      when: pool_check is success
-      ignore_errors: true
-
-    - name: Undefine volume pool
-      command: >
-        virsh pool-undefine "{{ libvirt_volume_pool }}"
-      when: pool_check is success
-
-    - name: Get UID of pool user
-      command: id -u "{{ ansible_user_id }}"
-      register: pool_uid
-      changed_when: false
-      when: pool_check is success
-
-    - name: Destroy pool definition file
-      file:
-        path: "/run/user/{{ pool_uid.stdout }}/libvirt/storage/run/{{ libvirt_volume_pool }}.xml"
-        state: absent
-      when: pool_check is success
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/templates/baremetalvm.xml.j2 b/deploy/metal3-vm/vm-setup/roles/libvirt/templates/baremetalvm.xml.j2
deleted file mode 100644 (file)
index 0841885..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-<domain type='{{ libvirt_domain_type }}'>
-  <name>{{ item.name }}</name>
-  <memory unit='MiB'>{{ flavors[item.flavor].memory }}</memory>
-  <vcpu>{{ flavors[item.flavor].vcpu }}</vcpu>
-
-  {{baremetal_vm_xml|default('')}}
-
-  <os>
-    <type arch='{{ libvirt_arch }}'>hvm</type>
-    <boot dev='network'/>
-    <bootmenu enable='no'/>
-  </os>
-  <features>
-    <acpi/>
-    <apic/>
-    <pae/>
-  </features>
-  <cpu mode='host-model'/>
-  <clock offset='utc'/>
-  <on_poweroff>destroy</on_poweroff>
-  <on_reboot>restart</on_reboot>
-  <on_crash>restart</on_crash>
-  <devices>
-    <disk type='volume' device='disk'>
-      <driver name='qemu' type='qcow2' cache='unsafe'/>
-      <source pool='{{ libvirt_volume_pool }}' volume='{{ item.name }}.qcow2'/>
-      <target dev='{{ libvirt_diskdev }}' bus='{{ libvirt_diskbus }}'/>
-    </disk>
-{% if libvirt_diskbus == 'scsi' %}
-  <controller type='scsi' model='virtio-scsi' />
-{% endif %}
-{% for network in networks %}
-    <interface type='bridge'>
-      <mac address='{{ node_mac_map.get(item.name).get(network.name) }}'/>
-      <source bridge='{{ network.bridge }}'/>
-      <model type='{{ libvirt_nic_model }}'/>
-{% if network.virtualport_type is defined %}
-      <virtualport type='{{ network.virtualport_type }}'/>
-{% endif %}
-    </interface>
-{% endfor %}
-    <serial type='pty'/>
-    <console type='pty'/>
-
-{% if enable_vnc_console|bool %}
-    <input type='mouse' bus='ps2'/>
-    <graphics type='vnc' port='-1' autoport='yes'/>
-    <video>
-      <model type='cirrus' vram='9216' heads='1'/>
-    </video>
-{% endif %}
-
-    {{baremetal_vm_device_xml|default('')}}
-
-  </devices>
-</domain>
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/templates/ironic_nodes.json.j2 b/deploy/metal3-vm/vm-setup/roles/libvirt/templates/ironic_nodes.json.j2
deleted file mode 100644 (file)
index 021f0cd..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-{% set lvars = { 'host_ip' : '192.168.122.1', 'pxe_network' : False} %}
-{% for network in networks %}
-{% if (not (network.forward_mode is defined and network.forward_mode == 'nat') and lvars['pxe_network'] == False) %}
-{% if lvars.update({'pxe_network' : network.name}) %}{% endif %}
-{% endif %}
-{% if network.address is defined and lvars['host_ip'] == '192.168.122.1' %}
-{% if lvars.update({'host_ip' : network.address}) %}{% endif %}
-{% endif %}
-{% endfor %}
-{
-  "nodes": [
-  {% for node in vm_nodes %}
-    {
-      "name": "{{ node.name|replace('_', '-') }}",
-      "driver": "ipmi",
-      "resource_class": "baremetal",
-      "driver_info": {
-        "ipmi_username": "admin",
-        "ipmi_password": "password",
-        "ipmi_address": "{{ lvars['host_ip'] }}",
-        "ipmi_port": "{{ node.virtualbmc_port }}",
-        "deploy_kernel": "http://172.22.0.1/images/ironic-python-agent.kernel",
-        "deploy_ramdisk": "http://172.22.0.1/images/ironic-python-agent.initramfs"
-      },
-      "ports": [{
-        "address": "{{ node_mac_map.get(node.name).get(lvars['pxe_network']) }}",
-        "pxe_enabled": true
-      }],
-      "properties": {
-        "local_gb": "{{ flavors[node.flavor].disk }}",
-        "cpu_arch": "{{ libvirt_arch }}"
-      },
-      "net": {
-        "links": [
-       {% for network in networks %}
-          {
-            "id": "{{ network.name }}_nic",
-            "ethernet_mac_address": "{{ node_mac_map.get(node.name).get(network.name) }}",
-            "type": "phy"
-          }{% if not loop.last %},{% endif %}
-       {% endfor %}
-        ],
-        "networks": [
-       {% for network in networks %}
-          {
-            "id": "{{ network.name }}",
-            "link": "{{ network.name }}_nic",
-            "type": "ipv4_dhcp"
-          }{% if not loop.last %},{% endif %}
-       {% endfor %}
-        ],
-        "services": []
-      }
-    }{% if not loop.last %},{% endif %}
-  {% endfor %}
-  ]
-}
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/templates/network.xml.j2 b/deploy/metal3-vm/vm-setup/roles/libvirt/templates/network.xml.j2
deleted file mode 100644 (file)
index 7ec1dc4..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-{% set nat_port_range = item.nat_port_range|default([1024, 65535]) %}
-{% set netmask = item.netmask|default('255.255.255.0') %}
-{% if item.dhcp_hosts is defined %}
-{% set dhcp_hosts_names = item.dhcp_hosts | map(attribute='name') | map('replace', '-', '_') | list %}
-{% endif %}
-<network>
-  <name>{{ item.name }}</name>
-  <bridge name='{{ item.bridge }}'/>
-{% if item.forward_mode is defined %}
-  <forward mode='{{ item.forward_mode }}'>
-{% if item.forward_mode == 'nat' %}
-    <nat>
-      <port start='{{ nat_port_range[0] }}' end='{{ nat_port_range[1] }}' />
-    </nat>
-{% endif %}
-  </forward>
-{% endif %}
-{% if item.virtualport_type is defined %}
-      <virtualport type='{{ item.virtualport_type }}'/>
-{% endif %}
-{% if item.address is defined  and item.forward_mode != 'bridge' %}
-  <ip address='{{ item.address }}' netmask='{{ netmask }}'>
-{% if item.dhcp_range is defined %}
-    <dhcp>
-      <range start='{{ item.dhcp_range[0] }}' end='{{ item.dhcp_range[1] }}'/>
-  {% if item.dhcp_hosts is defined %}
-    {% for host in item.dhcp_hosts %}
-      <host mac='{{ node_mac_map.get(ironic_prefix + dhcp_hosts_names[loop.index0]).get(item.name) }}' name='{{ host.name }}' ip='{{ host.ip }}'/>
-    {% endfor %}
-  {% endif %}
-    </dhcp>
-{% endif %}
-  </ip>
-{% if item.domain is defined %}
-  <domain name='{{ item.domain }}' localOnly='yes'/>
-{% endif %}
-{% if item.dns is defined %}
-  <dns>
-  {% for host in item.dns.hosts %}
-    <host ip='{{ host.ip }}'>
-    {% for name in host.hostnames %}
-      <hostname>{{ name }}</hostname>
-    {% endfor %}
-    </host>
-  {% endfor %}
-  {% if item.dns.srvs is defined %}
-    {% for srv in item.dns.srvs %}
-    <srv service='{{ srv.name }}' protocol='{{ srv.protocol }}' domain='{{ srv.domain }}' port='{{ srv.port }}' target='{{ srv.target }}' />
-    {% endfor %}
-  {% endif %}
-  {% if item.dns.forwarders is defined %}
-    {% for forwarder in item.dns.forwarders %}
-    <forwarder domain='{{ forwarder.domain }}' addr='{{ forwarder.addr }}' />
-    {% endfor %}
-  {% endif %}
-  </dns>
-{% endif %}
-{% endif %}
-</network>
diff --git a/deploy/metal3-vm/vm-setup/roles/libvirt/templates/volume_pool.xml.j2 b/deploy/metal3-vm/vm-setup/roles/libvirt/templates/volume_pool.xml.j2
deleted file mode 100644 (file)
index eb19810..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-<pool type='dir'>
-  <name>{{ libvirt_volume_pool }}</name>
-  <target>
-    <path>{{ libvirt_volume_path }}</path>
-    <permissions>
-      <mode>0755</mode>
-      <owner>-1</owner>
-      <group>-1</group>
-    </permissions>
-  </target>
-</pool>
diff --git a/deploy/metal3-vm/vm-setup/roles/virtbmc/defaults/main.yml b/deploy/metal3-vm/vm-setup/roles/virtbmc/defaults/main.yml
deleted file mode 100644 (file)
index 75ce168..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-# Can be set to "teardown" to destroy a previous configuration
-virtbmc_action: setup
diff --git a/deploy/metal3-vm/vm-setup/roles/virtbmc/files/vbmc_start.sh b/deploy/metal3-vm/vm-setup/roles/virtbmc/files/vbmc_start.sh
deleted file mode 100755 (executable)
index 651a9b0..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-set -eux -o pipefail
-
-name="$1"
-
-status=$(vbmc show  -f value $name | grep status | cut -f2 -d' ')
-
-export PATH=$PATH:/usr/local/bin
-
-if [[ $status != "running" ]]; then
-    vbmc start $name
-fi
diff --git a/deploy/metal3-vm/vm-setup/roles/virtbmc/meta/main.yml b/deploy/metal3-vm/vm-setup/roles/virtbmc/meta/main.yml
deleted file mode 100644 (file)
index 2083f0e..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
----
-dependencies:
-  - common
diff --git a/deploy/metal3-vm/vm-setup/roles/virtbmc/tasks/main.yml b/deploy/metal3-vm/vm-setup/roles/virtbmc/tasks/main.yml
deleted file mode 100644 (file)
index 3e35782..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-- include_tasks: setup_tasks.yml
-  when: virtbmc_action == "setup"
-- include_tasks: teardown_tasks.yml
-  when: virtbmc_action == "teardown"
diff --git a/deploy/metal3-vm/vm-setup/roles/virtbmc/tasks/setup_tasks.yml b/deploy/metal3-vm/vm-setup/roles/virtbmc/tasks/setup_tasks.yml
deleted file mode 100644 (file)
index 31d18f7..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
----
-
-- name: Create VirtualBMC directories
-  file:
-    path: "{{ item }}"
-    state: directory
-    mode: 0750
-    owner: root
-    group: root
-  with_items:
-    - "/etc/virtualbmc"
-    - "/var/log/virtualbmc"
-  become: true
-
-- name: Create VirtualBMC configuration file
-  copy:
-    mode: 0750
-    dest: "/etc/virtualbmc/virtualbmc.conf"
-    content: |
-      [default]
-      config_dir=/root/.vbmc
-      [log]
-      logfile=/var/log/virtualbmc/virtualbmc.log
-      debug=True
-      [ipmi]
-      session_timout=20
-  become: true
-
-- name: get virthost non_root_user userid
-  command: id -u {{ non_root_user }}
-  register: non_root_user_uid
-
-- name: set fact on non_root_user_uid
-  set_fact:
-    non_root_user_uid: "{{ non_root_user_uid.stdout }}"
-
-# The first network defined with an address will be used for vbmc access.
-- name: set vbmc address if there is a (nat) network defined with an address
-  set_fact:
-    vbmc_address: "{{ networks|selectattr('address', 'defined')|map(attribute='address')|list|first }}"
-  when: networks|selectattr('address', 'defined')|map(attribute='name')|list|length > 0
-
-# The connection uri is slightly different when using qemu:///system
-# and requires the root user.
-- name: set qemu uri for qemu:///system usage
-  set_fact:
-    vbmc_libvirt_uri: "qemu+ssh://root@{{ vbmc_address }}/system?&keyfile=/root/.ssh/id_rsa_virt_power&no_verify=1&no_tty=1"
-  when: libvirt_uri == "qemu:///system"
-
-- name: set qemu uri for qemu:///session usage
-  set_fact:
-    vbmc_libvirt_uri: "qemu+ssh://{{ non_root_user }}@{{ vbmc_address }}/session?socket=/run/user/{{ non_root_user_uid }}/libvirt/libvirt-sock&keyfile=/root/.ssh/id_rsa_virt_power&no_verify=1&no_tty=1"
-  when: vbmc_libvirt_uri is not defined
-
-- name: Start the Virtual BMCs (virtualbmc >= 1.4.0+) on Redhat-family OSes
-  service:
-    name: "virtualbmc"
-    state: started
-    enabled: true
-  when:
-    - ansible_os_family == "RedHat"
-  become: true
-
-- name: Start the Virtual BMCs (virtualbmc >= 1.4.0+) on ubuntu
-  shell: vbmcd || true
-  when:
-    - ansible_facts['distribution'] == "Ubuntu"
-  become: true
-
-
-- name: Create the Virtual BMCs
-  command: "vbmc add {{ item.name }} --port {{ item.virtualbmc_port }} --libvirt-uri {{ vbmc_libvirt_uri }}"
-  args:
-    creates: /root/.vbmc/{{ item.name }}/config
-  with_items: "{{ vm_nodes }}"
-  become: true
-  environment:
-    PATH: "{{ ansible_env.PATH }}:/usr/local/bin"
-
-- name: Start the Virtual BMCs
-  script: vbmc_start.sh {{ item.name }}
-  with_items: "{{ vm_nodes }}"
-  become: true
diff --git a/deploy/metal3-vm/vm-setup/roles/virtbmc/tasks/teardown_tasks.yml b/deploy/metal3-vm/vm-setup/roles/virtbmc/tasks/teardown_tasks.yml
deleted file mode 100644 (file)
index 64068e9..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
----
-
-- name: Remove virtualbmc directories
-  file:
-    path: "{{ item }}"
-    state: absent
-  with_items:
-    - "/etc/virtualbmc"
-    - "/var/log/virtualbmc"
-    - "/root/.vbmc/"
-  become: true
-
-- name: Stop/disable the Virtual BMCs (virtualbmc >= 1.4.0+) on CentOS
-  when:
-    - ansible_os_family == "RedHat"
-  service:
-    name: "virtualbmc"
-    state: "stopped" 
-    enabled: false
-  become: true
-
-- name: Stop/disable the Virtual BMCs (virtualbmc >= 1.4.0+) on Ubuntu
-  when:
-    - ansible_distribution == 'Ubuntu' 
-  shell: pkill vbmcd || true
-  become: true
diff --git a/deploy/metal3-vm/vm-setup/setup-playbook.yml b/deploy/metal3-vm/vm-setup/setup-playbook.yml
deleted file mode 100644 (file)
index fa942f2..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Setup dummy baremetal VMs
-  hosts: virthost
-  connection: local
-  gather_facts: true
-  tasks:
-    - import_role:
-        name: common
-    - import_role:
-        name: libvirt
-    - import_role:
-        name: virtbmc
-      when: vm_platform|default("libvirt") == "libvirt"
diff --git a/deploy/metal3-vm/vm-setup/teardown-playbook.yml b/deploy/metal3-vm/vm-setup/teardown-playbook.yml
deleted file mode 100644 (file)
index 571df12..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Teardown previous libvirt setup
-  hosts: virthost
-  connection: local
-  gather_facts: true
-  tasks:
-    - import_role:
-        name: common
-    - import_role:
-        name: libvirt
-      vars:
-        libvirt_action: "teardown"
-    - import_role:
-        name: virtbmc
-      vars:
-        virtbmc_action: "teardown"
-
index 990f174..ac4b9b6 100644 (file)
Binary files a/figure-2.odg and b/figure-2.odg differ
index a252874..6952583 100644 (file)
Binary files a/figure-2.png and b/figure-2.png differ