+diff --git a/ansible-role-requirements.yml b/ansible-role-requirements.yml
+index b2328960..e42ee418 100644
+--- a/ansible-role-requirements.yml
++++ b/ansible-role-requirements.yml
+@@ -190,7 +190,3 @@
+ scm: git
+ src: https://github.com/opendaylight/integration-packaging-ansible-opendaylight
+ version: 4aabce0605ef0f51eef4d6564cc7d779630706c5
+-- name: haproxy_endpoints
+- scm: git
+- src: https://github.com/logan2211/ansible-haproxy-endpoints
+- version: 49901861b16b8afaa9bccdbc649ac956610ff22b
+diff --git a/etc/openstack_deploy/env.d/baremetal.yml b/etc/openstack_deploy/env.d/baremetal.yml
+new file mode 100644
+index 00000000..70f86788
+--- /dev/null
++++ b/etc/openstack_deploy/env.d/baremetal.yml
+@@ -0,0 +1,88 @@
++---
++# This file contains an example to show how to set
++# the cinder-volume service to run in a container.
++#
++# Important note:
++# When using LVM or any iSCSI-based cinder backends, such as NetApp with
++# iSCSI protocol, the cinder-volume service *must* run on metal.
++# Reference: https://bugs.launchpad.net/ubuntu/+source/lxc/+bug/1226855
++
++container_skel:
++ cinder_scheduler_container:
++ properties:
++ is_metal: true
++ cinder_api_container:
++ properties:
++ is_metal: true
++ galera_container:
++ properties:
++ is_metal: true
++ glance_container:
++ properties:
++ is_metal: true
++ heat_apis_container:
++ properties:
++ is_metal: true
++ heat_engine_container:
++ properties:
++ is_metal: true
++ horizon_container:
++ properties:
++ is_metal: true
++ ironic_api_container:
++ properties:
++ is_metal: true
++ ironic_conductor_container:
++ properties:
++ is_metal: true
++ ironic_server_container:
++ properties:
++ is_metal: true
++ ironic_compute_container:
++ properties:
++ is_metal: true
++ keystone_container:
++ properties:
++ is_metal: true
++ utility_container:
++ properties:
++ is_metal: true
++ memcached_container:
++ properties:
++ is_metal: true
++ swift_proxy_container:
++ properties:
++ is_metal: true
++ unbound_container:
++ properties:
++ is_metal: true
++ rsyslog_container:
++ properties:
++ is_metal: true
++ rabbit_mq_container:
++ properties:
++ is_metal: true
++ neutron_agents_container:
++ properties:
++ is_metal: true
++ neutron_server_container:
++ properties:
++ is_metal: true
++ nova_api_metadata_container:
++ properties:
++ is_metal: true
++ nova_api_os_compute_container:
++ properties:
++ is_metal: true
++ nova_api_placement_container:
++ properties:
++ is_metal: true
++ nova_conductor_container:
++ properties:
++ is_metal: true
++ nova_scheduler_container:
++ properties:
++ is_metal: true
++ nova_console_container:
++ properties:
++ is_metal: true
+diff --git a/etc/openstack_deploy/openstack_user_config.yml.aio_real b/etc/openstack_deploy/openstack_user_config.yml.aio_real
+new file mode 100644
+index 00000000..e4df92e6
+--- /dev/null
++++ b/etc/openstack_deploy/openstack_user_config.yml.aio_real
+@@ -0,0 +1,276 @@
++---
++cidr_networks:
++ container: 172.29.236.0/24
++ tunnel: 172.29.240.0/24
++ storage: 172.29.244.0/24
++
++used_ips:
++ - "172.29.236.1,172.29.236.50"
++ - "172.29.240.1,172.29.240.50"
++ - "172.29.244.1,172.29.244.50"
++
++global_overrides:
++ ntp_servers:
++ - "10.20.110.16"
++ # The external IP is quoted simply to ensure that the .aio file can be used as input
++ # dynamic inventory testing.
++ internal_lb_vip_address: "172.29.236.11"
++ external_lb_vip_address: "10.37.44.239"
++ tunnel_bridge: "vlan962"
++ management_bridge: "br-ctlplane"
++
++ baremetal_images:
++ - name: "golden"
++ file: "/opt/ncio/overcloudimages/guest-image.img"
++ container_format: "bare"
++ disk_format: "raw"
++ - name: "ipa-kernel"
++ file: "/opt/ncio/overcloudimages/ironic-python-agent.kernel"
++ container_format: "aki"
++ disk_format: "aki"
++ - name: "ipa-ramdisk"
++ file: "/opt/ncio/overcloudimages/ironic-python-agent.initramfs"
++ container_format: "ari"
++ disk_format: "ari"
++
++ baremetal_flavor:
++ - name: "baremetal"
++ ram: 13999
++ vcpus: 8
++ disk: 10
++ extra_specs:
++ "cpu_arch": "x86_64"
++ "capabilities:boot_option": "local"
++
++ baremetal_networks:
++ - net_name: "provisioning_net"
++ subnet_name: "provisioning_subnet"
++ provider_network_type: "flat"
++ provider_physical_network: "flat"
++ cidr: 172.29.236.0/24
++ allocation_pool_start: 172.29.236.12
++ allocation_pool_end: 172.29.236.50
++
++ baremetal_ironic_nodes:
++ - name: controller-2
++ driver: pxe_ipmitool
++ network_interface: flat
++ properties:
++ cpu_arch: "x86_64"
++ cpus: 8
++ ram: 16384
++ disk_size: 40
++ capabilities: "boot_option:local"
++ root_device:
++ name: "/dev/sda"
++ nics:
++ - mac: "24:8A:07:8E:D3:CC"
++ driver_info:
++ power:
++ ipmi_address: 10.38.223.134
++ ipmi_username: admin
++ ipmi_password: admin
++ - name: controller-3
++ driver: pxe_ipmitool
++ network_interface: flat
++ properties:
++ cpu_arch: "x86_64"
++ cpus: 8
++ ram: 16384
++ disk_size: 40
++ capabilities: "boot_option:local"
++ root_device:
++ name: "/dev/sda"
++ nics:
++ - mac: "24:8A:07:8E:DE:54"
++ driver_info:
++ power:
++ ipmi_address: 10.38.223.133
++ ipmi_username: admin
++ ipmi_password: admin
++ - name: compute-1
++ driver: pxe_ipmitool
++ network_interface: flat
++ properties:
++ cpu_arch: "x86_64"
++ cpus: 8
++ ram: 16384
++ disk_size: 40
++ capabilities: "boot_option:local"
++ root_device:
++ name: "/dev/sda"
++ nics:
++ - mac: "24:8A:07:56:1D:28"
++ driver_info:
++ power:
++ ipmi_address: 10.38.223.132
++ ipmi_username: admin
++ ipmi_password: admin
++
++ baremetal_nova_nodes:
++ - node_name: "controller-2"
++ userdata: |
++ #cloud-config
++ users:
++ - name: ncioadmin
++ sudo: ['ALL=(ALL) NOPASSWD:ALL']
++ ssh-authorized-keys:
++ - "{{ lookup('file', ansible_env.PWD + '/.ssh/id_rsa.pub') }}"
++ flavor_name: "baremetal"
++ image_name: "golden"
++ networks_list:
++ - net-name: "provisioning_net"
++ v4-fixed-ip: "172.29.236.14"
++ - node_name: "controller-3"
++ userdata: |
++ #cloud-config
++ users:
++ - name: ncioadmin
++ sudo: ['ALL=(ALL) NOPASSWD:ALL']
++ ssh-authorized-keys:
++ - "{{ lookup('file', ansible_env.PWD + '/.ssh/id_rsa.pub') }}"
++ flavor_name: "baremetal"
++ image_name: "golden"
++ networks_list:
++ - net-name: "provisioning_net"
++ v4-fixed-ip: "172.29.236.15"
++ - node_name: "compute-1"
++ userdata: |
++ #cloud-config
++ users:
++ - name: ncioadmin
++ sudo: ['ALL=(ALL) NOPASSWD:ALL']
++ ssh-authorized-keys:
++ - "{{ lookup('file', ansible_env.PWD + '/.ssh/id_rsa.pub') }}"
++ flavor_name: "baremetal"
++ image_name: "golden"
++ networks_list:
++ - net-name: "provisioning_net"
++ v4-fixed-ip: "172.29.236.16"
++ swift:
++ part_power: 8
++ storage_network: 'vlan929'
++ replication_network: 'vlan929'
++ drives:
++ - name: swift1.img
++ - name: swift2.img
++ - name: swift3.img
++ mount_point: /srv
++ storage_policies:
++ - policy:
++ name: default
++ index: 0
++ default: True
++ provider_networks:
++ - network:
++ container_bridge: "br-ctlplane"
++ container_type: "veth"
++ container_interface: "br-ctlplane"
++ host_bind_override: "br-ctlplane"
++ type: "flat"
++ net_name: "flat"
++ is_container_address: true
++ is_ssh_address: true
++ group_binds:
++ - neutron_openvswitch_agent
++ - all_containers
++ - hosts
++ - network:
++ container_bridge: "vlan962"
++ container_type: "veth"
++ container_interface: "vlan962"
++ ip_from_q: "tunnel"
++ type: "vxlan"
++ range: "1:1000"
++ net_name: "vxlan"
++ group_binds:
++ - neutron_openvswitch_agent
++ - network:
++ container_bridge: "vlan929"
++ container_type: "veth"
++ container_interface: "vlan929"
++ ip_from_q: "storage"
++ type: "raw"
++ group_binds:
++ - glance_api
++ - cinder_api
++ - cinder_volume
++ - nova_compute
++ - swift_proxy
++
++# galera, memcache, rabbitmq, utility
++shared-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
++
++log_hosts:
++ controller-1:
++ ip: 172.29.236.11
++
++haproxy_hosts:
++ controller-1:
++ ip: 172.29.236.11
++storage-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
++
++storage_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ container_vars:
++ cinder_backends:
++ limit_container_types: cinder_volume
++ lvm:
++ volume_group: cinder-volumes
++ volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
++ volume_backend_name: LVM_iSCSI
++ iscsi_ip_address: "172.29.236.11"
++image_hosts:
++ controller-1:
++ ip: 172.29.236.11
++compute-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
++orchestration_hosts:
++ controller-1:
++ ip: 172.29.236.11
++dashboard_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ironic-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
++#host from where other nodes are baremetal provisioned
++baremetal-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
++
++# Ironic compute hosts. These compute hosts will be used to
++# facilitate ironic's interactions through nova.
++ironic-compute_hosts:
++ controller-1:
++ ip: 172.29.236.11
++identity_hosts:
++ controller-1:
++ ip: 172.29.236.11
++# neutron-server, neutron-agents
++network_hosts:
++ controller-1:
++ ip: 172.29.236.11
++swift-proxy_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ container_vars:
++ swift_proxy_vars:
++ limit_container_types: swift_proxy
++ read_affinity: "r1=100"
++ write_affinity: "r1"
++ write_affinity_node_count: "1 * replicas"
++swift_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ container_vars:
++ swift_vars:
++ limit_container_types: swift
++ zone: 0
++ region: 1
+diff --git a/etc/openstack_deploy/openstack_user_config.yml.prod.example_real b/etc/openstack_deploy/openstack_user_config.yml.prod.example_real
+new file mode 100644
+index 00000000..594104b7
+--- /dev/null
++++ b/etc/openstack_deploy/openstack_user_config.yml.prod.example_real
+@@ -0,0 +1,365 @@
++---
++cidr_networks:
++ container: 172.29.236.0/24
++ tunnel: 172.29.240.0/24
++ storage: 172.29.244.0/24
++
++used_ips:
++ - "172.29.236.1,172.29.236.50"
++ - "172.29.240.1,172.29.240.50"
++ - "172.29.244.1,172.29.244.50"
++
++global_overrides:
++ galera_initial_setup: false
++ ntp_servers:
++ - "10.20.110.16"
++ internal_lb_vip_address: 172.29.236.110
++ external_lb_vip_address: 10.37.44.242
++ tunnel_bridge: "vlan962"
++ management_bridge: "br-ctlplane"
++
++ baremetal_images:
++ - name: "golden"
++ file: "/opt/ncio/overcloudimages/guest-image.img"
++ container_format: "bare"
++ disk_format: "raw"
++ - name: "ipa-kernel"
++ file: "/opt/ncio/overcloudimages/ironic-python-agent.kernel"
++ container_format: "aki"
++ disk_format: "aki"
++ - name: "ipa-ramdisk"
++ file: "/opt/ncio/overcloudimages/ironic-python-agent.initramfs"
++ container_format: "ari"
++ disk_format: "ari"
++
++ baremetal_flavor:
++ - name: "baremetal"
++ ram: 13999
++ vcpus: 8
++ disk: 10
++ extra_specs:
++ "cpu_arch": "x86_64"
++ "capabilities:boot_option": "local"
++
++ baremetal_networks:
++ - net_name: "provisioning_net"
++ subnet_name: "provisioning_subnet"
++ provider_network_type: "flat"
++ provider_physical_network: "flat"
++ cidr: 172.29.236.0/24
++ allocation_pool_start: 172.29.236.14
++ allocation_pool_end: 172.29.236.50
++
++ swift:
++ part_power: 8
++ storage_network: 'vlan929'
++ replication_network: 'vlan929'
++ drives:
++ - name: swift1.img
++ - name: swift2.img
++ - name: swift3.img
++ mount_point: /srv
++ storage_policies:
++ - policy:
++ name: default
++ index: 0
++ default: True
++ provider_networks:
++ - network:
++ container_bridge: "br-ctlplane"
++ container_type: "veth"
++ container_interface: "br-ctlplane"
++ host_bind_override: "br-ctlplane"
++ type: "flat"
++ net_name: "flat"
++ is_container_address: true
++ is_ssh_address: true
++ group_binds:
++ - neutron_linuxbridge_agent
++ - all_containers
++ - hosts
++ - network:
++ container_bridge: "vlan962"
++ container_type: "veth"
++ container_interface: "vlan962"
++ ip_from_q: "tunnel"
++ type: "vxlan"
++ range: "1:1000"
++ net_name: "vxlan"
++ group_binds:
++ - neutron_linuxbridge_agent
++ - neutron-openvswitch-agent
++ - network:
++ container_bridge: "vlan929"
++ container_type: "veth"
++ container_interface: "vlan929"
++ ip_from_q: "storage"
++ type: "raw"
++ group_binds:
++ - glance_api
++ - cinder_api
++ - cinder_volume
++ - nova_compute
++
++###
++### Infrastructure
++###
++
++baremetal-interface_config_hosts:
++ controller-2:
++ ip: 172.29.236.14
++ host_vars:
++ os_net_config:
++ network_config:
++ - type: ovs_bridge
++ name: br-ctlplane
++ use_dhcp: false
++ dns_servers:
++ - "172.29.236.11"
++ - "10.102.12.68"
++ addresses:
++ - ip_netmask: 172.29.236.14/24
++ members:
++ - type: interface
++ primary: true
++ name: ens1f0
++ ovs_extra:
++ - "br-set-external-id br-ctlplane bridge-id br-ctlplane"
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 929
++ device: ens1f0
++ addresses:
++ - ip_netmask: 172.29.244.14/24
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 962
++ device: ens1f0
++ addresses:
++ - ip_netmask: 172.29.240.14/24
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 900
++ device: ens1f1
++ dns_servers:
++ - "10.102.12.68"
++ routes:
++ - ip_netmask: "0.0.0.0/0"
++ next_hop: "10.37.44.225"
++ addresses:
++ - ip_netmask: 10.37.44.240/27
++ controller-3:
++ ip: 172.29.236.15
++ host_vars:
++ os_net_config:
++ network_config:
++ - type: ovs_bridge
++ name: br-ctlplane
++ use_dhcp: false
++ dns_servers:
++ - "172.29.236.11"
++ - "10.102.12.68"
++ addresses:
++ - ip_netmask: 172.29.236.15/24
++ members:
++ - type: interface
++ primary: true
++ name: ens1f0
++ ovs_extra:
++ - "br-set-external-id br-ctlplane bridge-id br-ctlplane"
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 929
++ device: ens1f0
++ addresses:
++ - ip_netmask: 172.29.244.15/24
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 962
++ device: ens1f0
++ addresses:
++ - ip_netmask: 172.29.240.15/24
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 900
++ device: ens1f1
++ dns_servers:
++ - "10.102.12.68"
++ routes:
++ - ip_netmask: "0.0.0.0/0"
++ next_hop: "10.37.44.225"
++ addresses:
++ - ip_netmask: 10.37.44.241/27
++ compute-1:
++ ip: 172.29.236.16
++ host_vars:
++ os_net_config:
++ network_config:
++ - type: ovs_bridge
++ name: br-ctlplane
++ use_dhcp: false
++ dns_servers:
++ - "172.29.236.11"
++ addresses:
++ - ip_netmask: 172.29.236.16/24
++ members:
++ - type: interface
++ primary: true
++ name: ens1f0
++ ovs_extra:
++ - "br-set-external-id br-ctlplane bridge-id br-ctlplane"
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 929
++ device: ens1f0
++ addresses:
++ - ip_netmask: 172.29.244.16/24
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 962
++ device: ens1f0
++ addresses:
++ - ip_netmask: 172.29.240.16/24
++
++# galera, memcache, rabbitmq, utility
++shared-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ controller-2:
++ ip: 172.29.236.14
++ controller-3:
++ ip: 172.29.236.15
++
++# load balancer
++# Ideally the load balancer should not use the Infrastructure hosts.
++# Dedicated hardware is best for improved performance and security.
++haproxy_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ controller-2:
++ ip: 172.29.236.14
++ controller-3:
++ ip: 172.29.236.15
++
++###
++### OpenStack
++###
++
++# keystone
++identity_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ controller-2:
++ ip: 172.29.236.14
++ controller-3:
++ ip: 172.29.236.15
++
++# cinder api services
++storage-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
++
++storage_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ container_vars:
++ cinder_backends:
++ limit_container_types: cinder_volume
++ lvm:
++ volume_group: cinder-volumes
++ volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
++ volume_backend_name: LVM_iSCSI
++ iscsi_ip_address: "172.29.236.11"
++
++# nova api, conductor, etc services
++compute-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ controller-2:
++ ip: 172.29.236.14
++ controller-3:
++ ip: 172.29.236.15
++
++# heat
++orchestration_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ controller-2:
++ ip: 172.29.236.14
++ controller-3:
++ ip: 172.29.236.15
++
++# horizon
++dashboard_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ controller-2:
++ ip: 172.29.236.14
++ controller-3:
++ ip: 172.29.236.15
++
++# neutron server, agents (L3, etc)
++network_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ controller-2:
++ ip: 172.29.236.14
++ controller-3:
++ ip: 172.29.236.15
++
++# glance
++# As swift is deployed in this env, Glance uses swift as default backend
++image_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ controller-2:
++ ip: 172.29.236.14
++ controller-3:
++ ip: 172.29.236.15
++
++# nova hypervisors
++compute_hosts:
++ compute-1:
++ ip: 172.29.236.16
++
++ironic-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ controller-2:
++ ip: 172.29.236.14
++ controller-3:
++ ip: 172.29.236.15
++
++# Ironic compute hosts. These compute hosts will be used to
++# facilitate ironic's interactions through nova. At the moment
++# there are issues if we have multiple computes.
++ironic-compute_hosts:
++ controller-1:
++ ip: 172.29.236.11
++
++#Swift
++#swift proxy
++swift-proxy_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ container_vars:
++ swift_proxy_vars:
++ limit_container_types: swift_proxy
++ read_affinity: "r1=100"
++ write_affinity: "r1"
++ write_affinity_node_count: "1 * replicas"
++
++#swift hosts
++swift_hosts:
++ controller-1:
++ ip: 172.29.236.11
++ container_vars:
++ swift_vars:
++ limit_container_types: swift
++ zone: 0
++ region: 1
++
++#host from where other nodes are baremetal provisioned
++baremetal-infra_hosts:
++ controller-1:
++ ip: 172.29.236.11
+diff --git a/etc/openstack_deploy/user_variables.yml b/etc/openstack_deploy/user_variables.yml
+index d024710d..9d2882fa 100644
+--- a/etc/openstack_deploy/user_variables.yml
++++ b/etc/openstack_deploy/user_variables.yml
+@@ -17,10 +17,94 @@
+ ### This file contains commonly used overrides for convenience. Please inspect
+ ### the defaults for each role to find additional override options.
+ ###
++openstack_service_publicuri_proto: "https"
++openstack_host_specific_kernel_modules:
++ - { name: "ebtables", pattern: "CONFIG_BRIDGE_NF_EBTABLES", group: "network_hosts" }
++ - { name: "openvswitch", pattern: "CONFIG_OPENVSWITCH=", group: "network_hosts" }
++
++galera_wsrep_sst_method: rsync
++
++keystone_bin: "/usr/bin"
++keystone_bind_address: "{{ ansible_host }}"
++keystone_wsgi_processes: 1
++keystone_wsgi_threads: 1
++
++glance_bin: "/usr/bin"
++glance_etc_dir: "/etc/glance"
++glance_api_bind_address: "{{ ansible_host }}"
++glance_registry_bind_address: "{{ ansible_host }}"
++glance_wsgi_processes: 1
++
++cinder_bin: "/usr/bin"
++cinder_wsgi_processes: 1
++
++nova_bin: "/usr/bin"
++nova_wsgi_processes: 1
++nova_virt_types:
++ ironic:
++ nova_compute_driver: ironic.IronicDriver
++ nova_scheduler_host_manager: ironic_host_manager
++ nova_reserved_host_memory_mb: 0
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: False
++ kvm:
++ nova_compute_driver: libvirt.LibvirtDriver
++ nova_scheduler_host_manager: host_manager
++ nova_reserved_host_memory_mb: 2048
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: True
++ lxd:
++ nova_compute_driver: lxd.LXDDriver
++ nova_scheduler_host_manager: host_manager
++ nova_reserved_host_memory_mb: 2048
++ nova_compute_manager: nova.compute.manager.ComputeManager
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: True
++ qemu:
++ nova_compute_driver: libvirt.LibvirtDriver
++ nova_scheduler_host_manager: host_manager
++ nova_reserved_host_memory_mb: 2048
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: True
++ powervm:
++ nova_compute_driver: powervm.driver.PowerVMDriver
++ nova_scheduler_host_manager: host_manager
++ nova_reserved_host_memory_mb: 8192
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: True
++
++
++neutron_bin: "/usr/bin"
++neutron_bind_address: "{{ ansible_host }}"
++neutron_plugin_type: ml2.ovs
++
++heat_bin: "/usr/bin"
++heat_wsgi_processes: 1
++heat_bind_host: "{{ ansible_host }}"
++
++horizon_bin: "/usr/bin"
++horizon_lib_dir: "{{ horizon_bin | dirname }}/lib/python2.7/site-packages"
++horizon_openstack_dashboard_path: "/usr/share/openstack-dashboard"
++horizon_enable_ironic_ui: False
++
++swift_bin: "/usr/bin"
++swift_proxy_host: "{{ ansible_host }}"
++
++ironic_bin: "/usr/bin"
++ironic_service_host: "{{ ansible_host }}"
++ironic_service_names:
++ - "{{ ironic_conductor_program_name }}"
++ - "httpd"
++ironic_standalone: True
+
+ ## Debug and Verbose options.
+ debug: false
+-
++galera_ignore_cluster_state: true
+ ## Common Glance Overrides
+ # Set glance_default_store to "swift" if using Cloud Files backend
+ # or "rbd" if using ceph backend; the latter will trigger ceph to get
+@@ -40,7 +124,7 @@ debug: false
+ # When nova_libvirt_images_rbd_pool is defined, ceph will be installed on nova
+ # hosts.
+ # nova_libvirt_images_rbd_pool: vms
+-
++nova_console_type: novnc
+ # If you wish to change the dhcp_domain configured for both nova and neutron
+ # dhcp_domain: openstacklocal
+
+@@ -90,7 +174,8 @@ debug: false
+ # set the rabbitmq_use_ssl variable to 'false'. The default setting of 'true'
+ # is highly recommended for securing the contents of RabbitMQ messages.
+ # rabbitmq_use_ssl: false
+-
++rabbitmq_use_ssl: false
++haproxy_ssl: true
+ # RabbitMQ management plugin is enabled by default, the guest user has been
+ # removed for security reasons and a new userid 'monitoring' has been created
+ # with the 'monitoring' user tag. In order to modify the userid, uncomment the
+@@ -150,7 +235,7 @@ debug: false
+ # group_vars/all/keepalived.yml) in your user space if necessary.
+ #
+ # Uncomment this to disable keepalived installation (cf. documentation)
+-# haproxy_use_keepalived: False
++haproxy_use_keepalived: False
+ #
+ # HAProxy Keepalived configuration (cf. documentation)
+ # Make sure that this is set correctly according to the CIDR used for your
+@@ -173,3 +258,4 @@ debug: false
+
+ # Keepalived default IP address used to check its alive status (IPv4 only)
+ # keepalived_ping_address: "193.0.14.129"
++
+diff --git a/etc/openstack_deploy/user_variables.yml_real b/etc/openstack_deploy/user_variables.yml_real
+new file mode 100644
+index 00000000..40172b8c
+--- /dev/null
++++ b/etc/openstack_deploy/user_variables.yml_real
+@@ -0,0 +1,256 @@
++---
++# Copyright 2014, Rackspace US, Inc.
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++
++###
++### This file contains commonly used overrides for convenience. Please inspect
++### the defaults for each role to find additional override options.
++###
++
++openstack_host_specific_kernel_modules:
++ - { name: "ebtables", pattern: "CONFIG_BRIDGE_NF_EBTABLES", group: "network_hosts" }
++ - { name: "openvswitch", pattern: "CONFIG_OPENVSWITCH=", group: "network_hosts" }
++
++openrc_file_dest: "{{ ansible_env.PWD }}/openrc"
++openrc_file_owner: "{{ ansible_env.SUDO_USER }}"
++openrc_file_group: "{{ ansible_env.SUDO_USER }}"
++### neutron specific config
++neutron_ml2_drivers_type: "vxlan"
++galera_wsrep_sst_method: rsync
++
++#By default rabbitmq_management plugin is enabled in role. Override it with empty value
++rabbitmq_plugins:
++
++keystone_bin: "/usr/bin"
++keystone_bind_address: "{{ ansible_host }}"
++keystone_wsgi_processes: 1
++keystone_wsgi_threads: 48
++
++glance_bin: "/usr/bin"
++glance_api_bind_address: "{{ ansible_host }}"
++glance_registry_bind_address: "{{ ansible_host }}"
++
++cinder_bin: "/usr/bin"
++
++nova_bin: "/usr/bin"
++nova_scheduler_default_filters: "RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,AggregateCoreFilter,AggregateDiskFilter"
++nova_virt_types:
++ ironic:
++ nova_compute_driver: ironic.IronicDriver
++ nova_scheduler_host_manager: ironic_host_manager
++ nova_reserved_host_memory_mb: 0
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: False
++ kvm:
++ nova_compute_driver: libvirt.LibvirtDriver
++ nova_scheduler_host_manager: host_manager
++ nova_reserved_host_memory_mb: 2048
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: True
++ lxd:
++ nova_compute_driver: lxd.LXDDriver
++ nova_scheduler_host_manager: host_manager
++ nova_reserved_host_memory_mb: 2048
++ nova_compute_manager: nova.compute.manager.ComputeManager
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: True
++ qemu:
++ nova_compute_driver: libvirt.LibvirtDriver
++ nova_scheduler_host_manager: host_manager
++ nova_reserved_host_memory_mb: 2048
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: True
++ powervm:
++ nova_compute_driver: powervm.driver.PowerVMDriver
++ nova_scheduler_host_manager: host_manager
++ nova_reserved_host_memory_mb: 8192
++ nova_firewall_driver: nova.virt.firewall.NoopFirewallDriver
++ nova_scheduler_use_baremetal_filters: False
++ nova_scheduler_tracks_instance_changes: True
++
++
++neutron_bin: "/usr/bin"
++neutron_bind_address: "{{ ansible_host }}"
++
++heat_bin: "/usr/bin"
++heat_bind_host: "{{ ansible_host }}"
++
++horizon_bin: "/usr/bin"
++horizon_lib_dir: "{{ horizon_bin | dirname }}/lib/python2.7/site-packages"
++horizon_openstack_dashboard_path: "/usr/share/openstack-dashboard"
++
++swift_bin: "/usr/bin"
++swift_proxy_host: "{{ ansible_host }}"
++
++ironic_bin: "/usr/bin"
++ironic_service_host: "{{ ansible_host }}"
++ironic_service_names:
++ - "{{ ironic_conductor_program_name }}"
++ - "httpd"
++ironic_openstack_driver_list:
++ - agent_ipmitool
++ - pxe_ipmitool
++ - pxe_ssh
++
++## Debug and Verbose options.
++debug: true
++galera_ignore_cluster_state: true
++## Common Glance Overrides
++# Set glance_default_store to "swift" if using Cloud Files backend
++# or "rbd" if using ceph backend; the latter will trigger ceph to get
++# installed on glance. If using a file store, a shared file store is
++# recommended. See the OpenStack-Ansible install guide and the OpenStack
++# documentation for more details.
++# Note that "swift" is automatically set as the default back-end if there
++# are any swift hosts in the environment. Use this setting to override
++# this automation if you wish for a different default back-end.
++# glance_default_store: file
++
++## Ceph pool name for Glance to use
++# glance_rbd_store_pool: images
++# glance_rbd_store_chunk_size: 8
++
++## Common Nova Overrides
++# When nova_libvirt_images_rbd_pool is defined, ceph will be installed on nova
++# hosts.
++# nova_libvirt_images_rbd_pool: vms
++nova_console_type: novnc
++# If you wish to change the dhcp_domain configured for both nova and neutron
++# dhcp_domain: openstacklocal
++
++## Common Glance Overrides when using a Swift back-end
++# By default when 'glance_default_store' is set to 'swift' the playbooks will
++# expect to use the Swift back-end that is configured in the same inventory.
++# If the Swift back-end is not in the same inventory (ie it is already setup
++# through some other means) then these settings should be used.
++#
++# NOTE: Ensure that the auth version matches your authentication endpoint.
++#
++# NOTE: If the password for glance_swift_store_key contains a dollar sign ($),
++# it must be escaped with an additional dollar sign ($$), not a backslash. For
++# example, a password of "super$ecure" would need to be entered as
++# "super$$ecure" below. See Launchpad Bug #1259729 for more details.
++#
++# glance_swift_store_auth_version: 3
++# glance_swift_store_auth_address: "https://some.auth.url.com"
++# glance_swift_store_user: "OPENSTACK_TENANT_ID:OPENSTACK_USER_NAME"
++# glance_swift_store_key: "OPENSTACK_USER_PASSWORD"
++# glance_swift_store_container: "NAME_OF_SWIFT_CONTAINER"
++# glance_swift_store_region: "NAME_OF_REGION"
++
++## Common Ceph Overrides
++# ceph_mons:
++# - 10.16.5.40
++# - 10.16.5.41
++# - 10.16.5.42
++
++## Custom Ceph Configuration File (ceph.conf)
++# By default, your deployment host will connect to one of the mons defined above to
++# obtain a copy of your cluster's ceph.conf. If you prefer, uncomment ceph_conf_file
++# and customise to avoid ceph.conf being copied from a mon.
++#ceph_conf_file: |
++# [global]
++# fsid = 00000000-1111-2222-3333-444444444444
++# mon_initial_members = mon1.example.local,mon2.example.local,mon3.example.local
++# mon_host = 10.16.5.40,10.16.5.41,10.16.5.42
++# # optionally, you can use this construct to avoid defining this list twice:
++# # mon_host = {{ ceph_mons|join(',') }}
++# auth_cluster_required = cephx
++# auth_service_required = cephx
++
++
++# By default, openstack-ansible configures all OpenStack services to talk to
++# RabbitMQ over encrypted connections on port 5671. To opt-out of this default,
++# set the rabbitmq_use_ssl variable to 'false'. The default setting of 'true'
++# is highly recommended for securing the contents of RabbitMQ messages.
++# rabbitmq_use_ssl: false
++rabbitmq_use_ssl: false
++haproxy_ssl: false
++# RabbitMQ management plugin is enabled by default, the guest user has been
++# removed for security reasons and a new userid 'monitoring' has been created
++# with the 'monitoring' user tag. In order to modify the userid, uncomment the
++# following and change 'monitoring' to your userid of choice.
++# rabbitmq_monitoring_userid: monitoring
++
++
++## Additional pinning generator that will allow for more packages to be pinned as you see fit.
++## All pins allow for package and versions to be defined. Be careful using this as versions
++## are always subject to change and updates regarding security will become your problem from this
++## point on. Pinning can be done based on a package version, release, or origin. Use "*" in the
++## package name to indicate that you want to pin all package to a particular constraint.
++# apt_pinned_packages:
++# - { package: "lxc", version: "1.0.7-0ubuntu0.1" }
++# - { package: "libvirt-bin", version: "1.2.2-0ubuntu13.1.9" }
++# - { package: "rabbitmq-server", origin: "www.rabbitmq.com" }
++# - { package: "*", release: "MariaDB" }
++
++
++## Environment variable settings
++# This allows users to specify the additional environment variables to be set
++# which is useful in setting where you working behind a proxy. If working behind
++# a proxy It's important to always specify the scheme as "http://". This is what
++# the underlying python libraries will handle best. This proxy information will be
++# placed both on the hosts and inside the containers.
++
++## Example environment variable setup:
++# proxy_env_url: http://username:pa$$w0rd@10.10.10.9:9000/
++# no_proxy_env: "localhost,127.0.0.1,{{ internal_lb_vip_address }},{{ external_lb_vip_address }},{% for host in groups['all_containers'] %}{{ hostvars[host]['container_address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
++# global_environment_variables:
++# HTTP_PROXY: "{{ proxy_env_url }}"
++# HTTPS_PROXY: "{{ proxy_env_url }}"
++# NO_PROXY: "{{ no_proxy_env }}"
++# http_proxy: "{{ proxy_env_url }}"
++# https_proxy: "{{ proxy_env_url }}"
++# no_proxy: "{{ no_proxy_env }}"
++
++
++## SSH connection wait time
++# If an increased delay for the ssh connection check is desired,
++# uncomment this variable and set it appropriately.
++#ssh_delay: 5
++
++
++## HAProxy
++# Uncomment this to disable keepalived installation (cf. documentation)
++# haproxy_use_keepalived: False
++#
++# HAProxy Keepalived configuration (cf. documentation)
++# Make sure that this is set correctly according to the CIDR used for your
++# internal and external addresses.
++haproxy_keepalived_external_vip_cidr: "{{external_lb_vip_address}}/32"
++haproxy_keepalived_internal_vip_cidr: "{{internal_lb_vip_address}}/32"
++haproxy_keepalived_external_interface: "vlan900"
++haproxy_keepalived_internal_interface: "br-ctlplane"
++
++# Defines the default VRRP id used for keepalived with haproxy.
++# Overwrite it to your value to make sure you don't overlap
++# with existing VRRPs id on your network. Default is 10 for the external and 11 for the
++# internal VRRPs
++# haproxy_keepalived_external_virtual_router_id:
++# haproxy_keepalived_internal_virtual_router_id:
++
++# Defines the VRRP master/backup priority. Defaults respectively to 100 and 20
++# haproxy_keepalived_priority_master:
++# haproxy_keepalived_priority_backup:
++
++# Keepalived default IP address used to check its alive status (IPv4 only)
++keepalived_ping_address: "10.37.44.225"
++
++# All the previous variables are used in a var file, fed to the keepalived role.
++# To use another file to feed the role, override the following var:
++# haproxy_keepalived_vars_file: 'vars/configs/keepalived_haproxy.yml'
+diff --git a/inventory/dynamic_inventory.py b/inventory/dynamic_inventory.py
+deleted file mode 100755
+index d50940d5..00000000
+--- a/inventory/dynamic_inventory.py
++++ /dev/null
+@@ -1,81 +0,0 @@
+-#!/opt/ansible-runtime/bin/python
+-# Copyright 2014, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-#
+-# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
+-
+-import argparse
+-import os
+-import sys
+-
+-try:
+- from osa_toolkit import generate
+-except ImportError:
+- current_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
+- lib_path = os.path.join(current_path, '..')
+- sys.path.append(lib_path)
+- from osa_toolkit import generate
+-
+-
+-# Function kept in order to use relative pathing for the env.d directory
+-def args(arg_list):
+- """Setup argument Parsing."""
+- parser = argparse.ArgumentParser(
+- usage='%(prog)s',
+- description='OpenStack Inventory Generator',
+- epilog='Inventory Generator Licensed "Apache 2.0"')
+-
+- parser.add_argument(
+- '--config',
+- help='Path containing the user defined configuration files',
+- required=False,
+- default=None
+- )
+- parser.add_argument(
+- '--list',
+- help='List all entries',
+- action='store_true'
+- )
+-
+- parser.add_argument(
+- '--check',
+- help="Configuration check only, don't generate inventory",
+- action='store_true',
+- )
+-
+- parser.add_argument(
+- '-d',
+- '--debug',
+- help=('Output debug messages to log file. '
+- 'File is appended to, not overwritten'),
+- action='store_true',
+- default=False,
+- )
+-
+- parser.add_argument(
+- '-e',
+- '--environment',
+- help=('Directory that contains the base env.d directory.\n'
+- 'Defaults to <OSA_ROOT>/inventory/.'),
+- required=False,
+- default=os.path.dirname(__file__),
+- )
+-
+- return vars(parser.parse_args(arg_list))
+-
+-
+-if __name__ == '__main__':
+- all_args = args(sys.argv[1:])
+- output = generate.main(**all_args)
+- print(output)
+diff --git a/inventory/group_vars/all/glance.yml b/inventory/group_vars/all/glance.yml
+index bd39c8fb..b8766a24 100644
+--- a/inventory/group_vars/all/glance.yml
++++ b/inventory/group_vars/all/glance.yml
+@@ -18,7 +18,7 @@ glance_service_proto: http
+ glance_service_publicuri_proto: "{{ openstack_service_publicuri_proto | default(glance_service_proto) }}"
+ glance_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(glance_service_proto) }}"
+ glance_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(glance_service_proto) }}"
+-glance_service_publicuri: "{{ glance_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ glance_service_port }}"
++glance_service_publicuri: "{{ glance_service_publicuri_proto }}://{{ external_lb_vip_address | ipwrap }}:{{ glance_service_port }}"
+ glance_service_publicurl: "{{ glance_service_publicuri }}"
+ glance_service_internaluri: "{{ glance_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ glance_service_port }}"
+ glance_service_internalurl: "{{ glance_service_internaluri }}"
+diff --git a/inventory/group_vars/all/keystone.yml b/inventory/group_vars/all/keystone.yml
+index bbd1238b..e515e563 100644
+--- a/inventory/group_vars/all/keystone.yml
++++ b/inventory/group_vars/all/keystone.yml
+@@ -50,5 +50,5 @@ keystone_service_publicuri_insecure: |-
+ {% set _insecure = not (keystone_user_ssl_cert is defined or haproxy_user_ssl_cert is defined) %}
+ {% endif %}
+ {{ _insecure }}
+-keystone_service_publicuri: "{{ keystone_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ keystone_service_port }}"
++keystone_service_publicuri: "{{ keystone_service_publicuri_proto }}://{{ external_lb_vip_address | ipwrap }}:{{ keystone_service_port }}"
+ keystone_service_publicurl: "{{ keystone_service_publicuri }}/v3"
+diff --git a/inventory/group_vars/all/ssl.yml b/inventory/group_vars/all/ssl.yml
+index 9ff527b1..241fe8aa 100644
+--- a/inventory/group_vars/all/ssl.yml
++++ b/inventory/group_vars/all/ssl.yml
+@@ -18,4 +18,4 @@
+ # services running behind Apache (currently, Horizon and Keystone).
+ ssl_protocol: "ALL -SSLv2 -SSLv3"
+ # Cipher suite string from https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+-ssl_cipher_suite: "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS"
++ssl_cipher_suite: "DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DH-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:RSA+AESGCM:!LOW:!aNULL:!eNULL:!MD5:!DSS"
+diff --git a/inventory/group_vars/aodh_all.yml b/inventory/group_vars/aodh_all.yml
+index 9ce0ecd3..8e693782 100644
+--- a/inventory/group_vars/aodh_all.yml
++++ b/inventory/group_vars/aodh_all.yml
+@@ -21,7 +21,7 @@ aodh_connection_string: "mysql+pymysql://{{ aodh_galera_user }}:{{ aodh_containe
+ aodh_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
+
+ aodh_service_in_ldap: "{{ service_ldap_backend_enabled }}"
+-aodh_service_publicuri: "{{ openstack_service_publicuri_proto|default(aodh_service_proto) }}://{{ external_lb_vip_address }}:{{ aodh_service_port }}"
++aodh_service_publicuri: "{{ openstack_service_publicuri_proto|default(aodh_service_proto) }}://{{ external_lb_vip_address | ipwrap }}:{{ aodh_service_port }}"
+
+ # Ensure that the package state matches the global setting
+ aodh_package_state: "{{ package_state }}"
+diff --git a/inventory/group_vars/cinder_all.yml b/inventory/group_vars/cinder_all.yml
+index 5a79789a..7fe14155 100644
+--- a/inventory/group_vars/cinder_all.yml
++++ b/inventory/group_vars/cinder_all.yml
+@@ -55,7 +55,7 @@ cinder_glance_service_port: "{{ glance_service_port }}"
+ # If there are Swift hosts in the environment, then use it as the default Glance store
+ # This is specifically duplicated from glance_all for the cinder_glance_api_version
+ # setting below.
+-glance_default_store: "{{ ((groups['swift_all'] is defined) and (groups['swift_all'] | length > 0)) | ternary('swift', 'file') }}"
++#glance_default_store: "{{ ((groups['swift_all'] is defined) and (groups['swift_all'] | length > 0)) | ternary('swift', 'file') }}"
+
+ # cinder_backend_lvm_inuse: True if current host has an lvm backend
+ cinder_backend_lvm_inuse: '{{ (cinder_backends|default("")|to_json).find("cinder.volume.drivers.lvm.LVMVolumeDriver") != -1 }}'
+diff --git a/inventory/group_vars/galera_all.yml b/inventory/group_vars/galera_all.yml
+index e70a42d9..07348f63 100644
+--- a/inventory/group_vars/galera_all.yml
++++ b/inventory/group_vars/galera_all.yml
+@@ -30,7 +30,7 @@ galera_disable_privatedevices: "{{ ((properties.is_metal | default(false)) | boo
+ # By default galera_monitoring xinetd app is open to 0.0.0.0/0
+ # This makes sure the monitoring is only restricted to the necessary nodes:
+ # the load balancers, and the galera nodes.
+-galera_monitoring_allowed_source: "{% for node in groups['galera_all'] + groups['haproxy_all'] %}{{ hostvars[node]['ansible_host'] }} {% endfor %} 127.0.0.1"
++galera_monitoring_allowed_source: "{% if groups['haproxy'] | default([]) | length > 0 %} {% for node in groups['galera_all'] + groups['haproxy_all'] %}{{ hostvars[node]['ansible_host'] }} {% endfor %} 127.0.0.1 {% else %} {% for node in groups['galera_all'] %}{{ hostvars[node]['ansible_host'] }} {% endfor %} 127.0.0.1 {% endif %}"
+
+ # Galera sessions are long lived, so if we do endpoint maintenance we will
+ # force kill the sessions to force a failover to the active endpoint.
+diff --git a/inventory/group_vars/haproxy/haproxy.yml b/inventory/group_vars/haproxy/haproxy.yml
+index 7f9fac88..d881be9f 100644
+--- a/inventory/group_vars/haproxy/haproxy.yml
++++ b/inventory/group_vars/haproxy/haproxy.yml
+@@ -22,11 +22,6 @@ keepalived_selinux_compile_rules:
+ # Ensure that the package state matches the global setting
+ haproxy_package_state: "{{ package_state }}"
+
+-haproxy_whitelist_networks:
+- - 192.168.0.0/16
+- - 172.16.0.0/12
+- - 10.0.0.0/8
+-
+ haproxy_galera_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+ haproxy_glance_registry_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+ haproxy_keystone_admin_whitelist_networks: "{{ haproxy_whitelist_networks }}"
+@@ -53,36 +48,6 @@ haproxy_default_services:
+ - "httpchk HEAD / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"
+ haproxy_whitelist_networks: "{{ haproxy_galera_whitelist_networks }}"
+ haproxy_service_enabled: "{{ groups['galera_all'] is defined and groups['galera_all'] | length > 0 }}"
+- - service:
+- haproxy_service_name: repo_git
+- haproxy_backend_nodes: "{{ groups['repo_all'] | default([]) }}"
+- haproxy_bind: "{{ [internal_lb_vip_address] }}"
+- haproxy_port: 9418
+- haproxy_balance_type: tcp
+- haproxy_backend_options:
+- - tcp-check
+- haproxy_whitelist_networks: "{{ haproxy_repo_git_whitelist_networks }}"
+- haproxy_service_enabled: "{{ groups['repo_all'] is defined and groups['repo_all'] | length > 0 }}"
+- - service:
+- haproxy_service_name: repo_all
+- haproxy_backend_nodes: "{{ groups['repo_all'] | default([]) }}"
+- haproxy_bind: "{{ [internal_lb_vip_address] }}"
+- haproxy_port: 8181
+- haproxy_balance_type: http
+- haproxy_backend_options:
+- - "httpchk HEAD / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"
+- haproxy_service_enabled: "{{ groups['repo_all'] is defined and groups['repo_all'] | length > 0 }}"
+- - service:
+- haproxy_service_name: repo_cache
+- haproxy_backend_nodes: "{{ (groups['repo_all'] | default([]))[:1] }}" # list expected
+- haproxy_backup_nodes: "{{ (groups['repo_all'] | default([]))[1:] }}"
+- haproxy_bind: "{{ [internal_lb_vip_address] }}"
+- haproxy_port: "{{ repo_pkg_cache_port }}"
+- haproxy_balance_type: http
+- haproxy_backend_options:
+- - "httpchk HEAD /acng-report.html HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"
+- haproxy_whitelist_networks: "{{ haproxy_repo_cache_whitelist_networks }}"
+- haproxy_service_enabled: "{{ groups['repo_all'] is defined and groups['repo_all'] | length > 0 }}"
+ - service:
+ haproxy_service_name: glance_api
+ haproxy_backend_nodes: "{{ groups['glance_api'] | default([]) }}"
+@@ -187,9 +152,9 @@ haproxy_default_services:
+ haproxy_port: 8780
+ haproxy_balance_type: http
+ haproxy_backend_options:
+- - "httpchk GET / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"
++ - "httpchk GET /placement HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"
+ haproxy_backend_httpcheck_options:
+- - "expect status 200"
++ - "expect status 401"
+ haproxy_whitelist_networks: "{{ haproxy_nova_placement_whitelist_networks }}"
+ haproxy_service_enabled: "{{ groups['nova_api_placement'] is defined and groups['nova_api_placement'] | length > 0 }}"
+ - service:
+@@ -197,14 +162,12 @@ haproxy_default_services:
+ haproxy_backend_nodes: "{{ groups['nova_console'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+ haproxy_port: "{{ hostvars[(groups['nova_console'] | default(['localhost']))[0] | default('localhost')]['nova_console_port'] | default(6082) }}"
+- haproxy_balance_type: http
++ haproxy_balance_type: tcp
+ haproxy_timeout_client: 60m
+ haproxy_timeout_server: 60m
+ haproxy_balance_alg: source
+ haproxy_backend_options:
+- - "httpchk HEAD / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"
+- haproxy_backend_httpcheck_options:
+- - "expect status 404"
++ - tcp-check
+ haproxy_service_enabled: "{{ groups['nova_console'] is defined and groups['nova_console'] | length > 0 }}"
+ - service:
+ haproxy_service_name: cinder_api
+@@ -219,7 +182,7 @@ haproxy_default_services:
+ haproxy_service_name: horizon
+ haproxy_backend_nodes: "{{ groups['horizon_all'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+- haproxy_ssl_all_vips: true
++ haproxy_ssl_all_vips: false
+ haproxy_port: "{{ haproxy_ssl | ternary(443,80) }}"
+ haproxy_backend_port: 80
+ haproxy_redirect_http_port: 80
+@@ -270,7 +233,8 @@ haproxy_default_services:
+ haproxy_service_name: rabbitmq_mgmt
+ haproxy_backend_nodes: "{{ groups['rabbitmq'] | default([]) }}"
+ haproxy_ssl: "{{ haproxy_ssl }}"
+- haproxy_port: 15672
++ haproxy_bind: "{{ [internal_lb_vip_address] }}"
++ haproxy_port: 5672
+ haproxy_balance_type: http
+ haproxy_backend_options:
+ - "httpchk HEAD / HTTP/1.0\\r\\nUser-agent:\\ osa-haproxy-healthcheck"
+diff --git a/inventory/group_vars/haproxy/keepalived.yml b/inventory/group_vars/haproxy/keepalived.yml
+index 977f8ee3..fdf2b70d 100644
+--- a/inventory/group_vars/haproxy/keepalived.yml
++++ b/inventory/group_vars/haproxy/keepalived.yml
+@@ -13,7 +13,6 @@
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+
+-keepalived_ping_address: "193.0.14.129"
+ keepalived_ping_count: 1
+ keepalived_ping_interval: 10
+ keepalived_ubuntu_src: "native"
+@@ -36,7 +35,7 @@ keepalived_scripts:
+ haproxy_check_script:
+ check_script: "/bin/kill -0 `cat /var/run/haproxy.pid`"
+ pingable_check_script:
+- check_script: "/bin/ping -c {{ keepalived_ping_count }} {{ keepalived_ping_address }} 1>&2"
++ check_script: "/bin/ping -c {{ keepalived_ping_count }} {{ keepalived_ping_address | default('193.0.14.129') }} 1>&2"
+ interval: "{{ keepalived_ping_interval }}"
+ fall: 2
+ rise: 4
+diff --git a/osa_toolkit/manage.py b/osa_toolkit/manage.py
+index 5237dfae..9f4322d9 100644
+--- a/osa_toolkit/manage.py
++++ b/osa_toolkit/manage.py
+@@ -1,4 +1,4 @@
+-#!/opt/ansible-runtime/bin/python
++#!/usr/bin/env python
+ #
+ # Copyright 2014, Rackspace US, Inc.
+ #
+diff --git a/playbooks/ansible-conf-update.yml b/playbooks/ansible-conf-update.yml
+new file mode 100644
+index 00000000..213abee7
+--- /dev/null
++++ b/playbooks/ansible-conf-update.yml
+@@ -0,0 +1,15 @@
++- name: Ansible config update
++ hosts: [ management ]
++ gather_facts: True
++ become: yes
++ become_method: sudo
++ become_user: root
++ tasks:
++ - name: Change ansible configuration
++ ini_file:
++ path: "/etc/ansible/ansible.cfg"
++ section: "{{item.section}}"
++ option: "{{item.option}}"
++ value: "{{item.value}}"
++ with_items:
++ - { section: ssh_connection, option: "control_path", value: "%(directory)s/%%h" }
+diff --git a/tests/test-inventory.ini b/playbooks/bootstrap-inventory.ini
+similarity index 100%
+rename from tests/test-inventory.ini
+rename to playbooks/bootstrap-inventory.ini
+diff --git a/playbooks/bootstrap-kvm-contorller-1-vars.yml b/playbooks/bootstrap-kvm-contorller-1-vars.yml
+new file mode 100644
+index 00000000..73b7b976
+--- /dev/null
++++ b/playbooks/bootstrap-kvm-contorller-1-vars.yml
+@@ -0,0 +1,40 @@
++aio_hostname: "controller-1"
++bootstrap_host_loopback_cinder_size: 8
++bootstrap_host_loopback_swift_size: 8
++bootstrap_host_data_disk_min_size: 10
++bootstrap_env_file: "{{ bootstrap_host_aio_config_path }}/env.d/baremetal.yml"
++sudo_user: "ncioadmin"
++# password: ncioadmin
++# Generated with command "openssl passwd -1 -salt ncioadmin ncioadmin"
++sudo_user_password: "$1$ncioadmi$vDa1mNhcMDKMBSI27RqW51"
++power_contorl_ssh_priv_key: "/tmp/id_rsa"
++os_net_config:
++ network_config:
++ - type: ovs_bridge
++ name: br-ctlplane
++ use_dhcp: false
++ dns_servers:
++ - "10.102.12.68"
++ addresses:
++ - ip_netmask: 172.29.236.11/24
++ members:
++ - type: interface
++ primary: true
++ name: eth0
++ ovs_extra:
++ - "br-set-external-id br-ctlplane bridge-id br-ctlplane"
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 10
++ device: eth0
++ addresses:
++ - ip_netmask: 172.29.244.11/24
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 20
++ device: eth0
++ addresses:
++ - ip_netmask: 172.29.240.11/24
++ - type: interface
++ use_dhcp: true
++ name: eth1
+diff --git a/playbooks/bootstrap-physical-contorller-1-vars.yml b/playbooks/bootstrap-physical-contorller-1-vars.yml
+new file mode 100644
+index 00000000..f16defb0
+--- /dev/null
++++ b/playbooks/bootstrap-physical-contorller-1-vars.yml
+@@ -0,0 +1,47 @@
++aio_hostname: "controller-1"
++bootstrap_host_loopback_cinder_size: 8
++bootstrap_host_loopback_swift_size: 8
++bootstrap_host_data_disk_min_size: 10
++bootstrap_env_file: "{{ bootstrap_host_aio_config_path }}/env.d/baremetal.yml"
++sudo_user: "ncioadmin"
++# password: ncioadmin
++# Generated with command "openssl passwd -1 -salt ncioadmin ncioadmin"
++sudo_user_password: "$1$ncioadmi$vDa1mNhcMDKMBSI27RqW51"
++bootstrap_host_aio_config_name: "openstack_user_config.yml.aio_real"
++bootstrap_host_user_variables_filename: "user_variables.yml_real"
++os_net_config:
++ network_config:
++ - type: ovs_bridge
++ name: br-ctlplane
++ use_dhcp: false
++ addresses:
++ - ip_netmask: 172.29.236.11/24
++ members:
++ - type: interface
++ primary: true
++ name: ens255f0
++ ovs_extra:
++ - "br-set-external-id br-ctlplane bridge-id br-ctlplane"
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 978
++ device: ens4f0
++ addresses:
++ - ip_netmask: 172.29.244.11/24
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 1002
++ device: ens4f0
++ addresses:
++ - ip_netmask: 172.29.240.11/24
++ - type: vlan
++ use_dhcp: false
++ vlan_id: 902
++ device: ens4f0
++ dns_servers:
++ - "10.39.12.252"
++ routes:
++ - ip_netmask: "0.0.0.0/0"
++ next_hop: "10.39.172.254"
++ addresses:
++ - ip_netmask: 10.39.172.10/24
+diff --git a/playbooks/ceph-install.yml b/playbooks/ceph-install.yml
+index 088a71c1..bdb2aa98 100644
+--- a/playbooks/ceph-install.yml
++++ b/playbooks/ceph-install.yml
+@@ -15,7 +15,6 @@
+
+ - name: Install ceph mons
+ hosts: ceph-mon
+- user: root
+ pre_tasks:
+ - include: common-tasks/os-log-dir-setup.yml
+ vars:
+@@ -121,7 +120,6 @@
+
+ - name: Install ceph osds
+ hosts: ceph-osd
+- user: root
+ pre_tasks:
+ - include: common-tasks/os-log-dir-setup.yml
+ vars:
+diff --git a/playbooks/common-playbooks/cinder.yml b/playbooks/common-playbooks/cinder.yml
+index ca8a991d..f4d59f1f 100644
+--- a/playbooks/common-playbooks/cinder.yml
++++ b/playbooks/common-playbooks/cinder.yml
+@@ -17,7 +17,6 @@
+ hosts: "{{ cinder_hosts }}"
+ serial: "{{ cinder_serial }}"
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - cinder
+diff --git a/playbooks/common-playbooks/glance.yml b/playbooks/common-playbooks/glance.yml
+index f16fa1ee..21babc7b 100644
+--- a/playbooks/common-playbooks/glance.yml
++++ b/playbooks/common-playbooks/glance.yml
+@@ -17,7 +17,6 @@
+ hosts: "{{ glance_hosts }}"
+ serial: "{{ glance_serial }}"
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - glance
+diff --git a/playbooks/common-playbooks/neutron.yml b/playbooks/common-playbooks/neutron.yml
+index a9c7402c..bbcc7b41 100644
+--- a/playbooks/common-playbooks/neutron.yml
++++ b/playbooks/common-playbooks/neutron.yml
+@@ -17,7 +17,6 @@
+ hosts: "{{ neutron_hosts }}"
+ serial: "{{ neutron_serial }}"
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - neutron
+@@ -88,6 +87,7 @@
+ neutron_overlay_network: "{{ _overlay_network }}"
+ neutron_provider_networks: "{{ _provider_networks }}"
+ neutron_local_ip: "{{ tunnel_address }}"
++ when: (reinitialized_nodes is not defined and scaled_out_nodes is not defined) or (reinitialized_nodes is defined and hostname in reinitialized_nodes) or (scaled_out_nodes is defined and hostname in scaled_out_nodes)
+
+ - role: "bird"
+ when:
+diff --git a/playbooks/common-playbooks/nova.yml b/playbooks/common-playbooks/nova.yml
+index 20fa5ea5..fb8c800a 100644
+--- a/playbooks/common-playbooks/nova.yml
++++ b/playbooks/common-playbooks/nova.yml
+@@ -17,7 +17,6 @@
+ hosts: "{{ nova_hosts }}"
+ serial: "{{ nova_serial }}"
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ vars_files:
+ - ../defaults/repo_packages/nova_consoles.yml
+diff --git a/playbooks/common-tasks/mysql-db-user.yml b/playbooks/common-tasks/mysql-db-user.yml
+index 237140f3..fb54a1de 100644
+--- a/playbooks/common-tasks/mysql-db-user.yml
++++ b/playbooks/common-tasks/mysql-db-user.yml
+@@ -36,6 +36,6 @@
+ priv: "{{ db_name }}.*:ALL"
+ append_privs: "{{ db_append_privs | default(omit) }}"
+ delegate_to: "{{ groups['galera_all'][0] }}"
+- with_items: "{{ grant_list | default(['localhost', '%']) }}"
++ with_items: "{{ grant_list | default(['localhost', '%', galera_address, ansible_host]) }}"
+ tags:
+ - common-mysql
+diff --git a/playbooks/dump_vars.yaml b/playbooks/dump_vars.yaml
+new file mode 100644
+index 00000000..d38b7de1
+--- /dev/null
++++ b/playbooks/dump_vars.yaml
+@@ -0,0 +1,6 @@
++---
++- name: dumping vars
++ hosts: base
++ tasks:
++ - name: Dump all vars
++ action: template src=dumpall.j2 dest=/tmp/ansible.all
+diff --git a/playbooks/dumpall.j2 b/playbooks/dumpall.j2
+new file mode 100644
+index 00000000..e1264a30
+--- /dev/null
++++ b/playbooks/dumpall.j2
+@@ -0,0 +1,19 @@
++Module Variables ("vars"):
++--------------------------------
++{{ vars | to_nice_json }}
++
++Environment Variables ("environment"):
++--------------------------------
++{{ environment | to_nice_json }}
++
++GROUP NAMES Variables ("group_names"):
++--------------------------------
++{{ group_names | to_nice_json }}
++
++GROUPS Variables ("groups"):
++--------------------------------
++{{ groups | to_nice_json }}
++
++HOST Variables ("hostvars"):
++--------------------------------
++{{ hostvars | to_nice_json }}
+diff --git a/playbooks/etcd-install.yml b/playbooks/etcd-install.yml
+index 762fa019..03b9785e 100644
+--- a/playbooks/etcd-install.yml
++++ b/playbooks/etcd-install.yml
+@@ -16,7 +16,6 @@
+ - name: Install etcd server cluster
+ hosts: etcd_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/unbound-clients.yml
+diff --git a/playbooks/galera-install.yml b/playbooks/galera-install.yml
+index b7a0d2b3..d339343e 100644
+--- a/playbooks/galera-install.yml
++++ b/playbooks/galera-install.yml
+@@ -12,27 +12,30 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: initial_poweroff_hosts.yml, memcached-install.yml
+
+ - name: Galera container config
+ hosts: galera_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+ serial: 1
+- user: root
+ tasks:
+ - include: common-tasks/os-log-dir-setup.yml
+ vars:
+ log_dirs:
+ - src: "/openstack/log/{{ inventory_hostname }}-mysql_logs"
+ dest: "/var/log/mysql_logs"
++ when: reinitialized_nodes is not defined or (reinitialized_nodes is defined and hostname in reinitialized_nodes)
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ vars:
+ list_of_bind_mounts: "{{ galera_container_bind_mounts }}"
+ extra_container_config_no_restart:
+ - "lxc.start.order=10"
++ when: reinitialized_nodes is not defined or (reinitialized_nodes is defined and hostname in reinitialized_nodes)
+ - include: common-tasks/unbound-clients.yml
+ static: no
+ when:
+ - hostvars['localhost']['resolvconf_enabled'] | bool
++ - reinitialized_nodes is not defined or (reinitialized_nodes is defined and hostname in reinitialized_nodes)
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - galera
+@@ -41,17 +44,9 @@
+ hosts: galera_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+ serial: 1
+- user: root
+ roles:
+- - role: haproxy_endpoints
+- haproxy_state: disabled
+- static: no
+- when: "groups['haproxy'] | default([]) | length > 0"
+ - role: "galera_server"
+- - role: haproxy_endpoints
+- haproxy_state: enabled
+- static: no
+- when: "groups['haproxy'] | default([]) | length > 0"
++ when: reinitialized_nodes is not defined or (reinitialized_nodes is defined and hostname in reinitialized_nodes)
+ - role: "rsyslog_client"
+ rsyslog_client_log_rotate_file: galera_log_rotate
+ rsyslog_client_log_dir: "/var/log/mysql_logs"
+@@ -61,12 +56,29 @@
+ rsyslog_client_config_name: "99-galera-rsyslog-client.conf"
+ tags:
+ - rsyslog
++ when: reinitialized_nodes is not defined or (reinitialized_nodes is defined and hostname in reinitialized_nodes)
+ - role: "system_crontab_coordination"
+ tags:
+ - crontab
++ when: reinitialized_nodes is not defined or (reinitialized_nodes is defined and hostname in reinitialized_nodes)
+ vars:
+ galera_server_id: "{{ inventory_hostname | string_2_int }}"
+ galera_wsrep_node_name: "{{ container_name }}"
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - galera
++
++- name: Verify Galera cluster count.
++ hosts: baremetal-infra_hosts
++ tasks:
++ - set_fact:
++ given_cluster_count: "{{ groups['galera_all'] | count }}"
++
++ - name: Check the mysql cluster count
++ shell: mysql -e 'show status like "%wsrep_cluster_%";' | awk -F "wsrep_cluster_size" {'print $2'} |tr -d " \t\n\r"
++ register: cluster_count
++ until: cluster_count|success
++ failed_when: cluster_count.stdout != given_cluster_count
++ tags:
++ - galera
++
+diff --git a/playbooks/haproxy-install.yml b/playbooks/haproxy-install.yml
+index 49eaf35b..bf16e237 100644
+--- a/playbooks/haproxy-install.yml
++++ b/playbooks/haproxy-install.yml
+@@ -12,11 +12,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: baremetal-interface-config.yml,provisioning_done.yml
+
+ - name: haproxy base config
+ hosts: haproxy
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/os-log-dir-setup.yml
+@@ -28,12 +28,48 @@
+ static: no
+ when:
+ - hostvars['localhost']['resolvconf_enabled'] | bool
+- roles:
+- - role: "keepalived"
+- when: haproxy_use_keepalived | bool
+- tags:
+- - keepalived
+ environment: "{{ deployment_environment_variables | default({}) }}"
++
++ tasks:
++ - name: Check if hasmanager is enabled
++ stat:
++ path: "/usr/lib/systemd/system/hasmanager.service"
++ register: stat_hasmanager
++
++ - name: Fetch external IPs on RTE
++ shell: "ip a s {{haproxy_keepalived_external_interface}} | grep inet | awk -F ' ' {'print $2'}"
++ register: extipresult
++
++ - name: Add External IPv4 VIP on installation controller
++ command: "ip a a {{external_lb_vip_address}}/32 dev {{haproxy_keepalived_external_interface}}"
++ when:
++ - installation_controller == inventory_hostname
++ - haproxy_use_keepalived == False
++ - stat_hasmanager.stat.exists == False
++ - external_lb_vip_address + '/32' not in extipresult.stdout_lines
++ - (external_lb_vip_address | ipv4)
++
++ - name: Add External IPv6 VIP on installation controller
++ command: "ip a a {{external_lb_vip_address}}/128 dev {{haproxy_keepalived_external_interface}}"
++ when:
++ - installation_controller == inventory_hostname
++ - haproxy_use_keepalived == False
++ - stat_hasmanager.stat.exists == False
++ - external_lb_vip_address + '/128' not in extipresult.stdout_lines
++ - (external_lb_vip_address | ipv6)
++
++ - name: Fetch internal IPs on RTE
++ shell: "ip -4 a s {{haproxy_keepalived_internal_interface}} | grep inet | awk -F ' ' {'print $2'}"
++ register: intipresult
++
++ - name: Add Internal VIP on installation controller
++ command: "ip a a {{internal_lb_vip_address}}/32 dev {{haproxy_keepalived_internal_interface}}"
++ when:
++ - installation_controller == inventory_hostname
++ - haproxy_use_keepalived == False
++ - stat_hasmanager.stat.exists == False
++ - internal_lb_vip_address + '/32' not in intipresult.stdout_lines
++
+ tags:
+ - haproxy-config
+ - haproxy
+@@ -41,7 +77,6 @@
+ - name: Install haproxy
+ hosts: haproxy
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - name: Remove legacy haproxy configuration files
+ file:
+diff --git a/playbooks/hosts_config.yml b/playbooks/hosts_config.yml
+new file mode 100644
+index 00000000..9e6970fa
+--- /dev/null
++++ b/playbooks/hosts_config.yml
+@@ -0,0 +1,33 @@
++---
++# Copyright 2014, Rackspace US, Inc.
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# cmframework.requires: baremetal-interface-config.yml
++
++- name: Setup /etc/hosts file
++ hosts: lxc_hosts
++ tasks:
++ - name: Fix /etc/hosts
++ lineinfile:
++ dest: /etc/hosts
++ state: present
++ line: "{{ hostvars[item]['ansible_host'] }} {{ hostvars[item]['ansible_hostname'] }} {{ hostvars[item]['ansible_hostname'].split('.')[0] }}"
++ with_items: "{{ groups['all'] }}"
++ when:
++ - item != 'localhost'
++ - name: Ensure localhost /etc/hosts entry is correct
++ lineinfile:
++ dest: /etc/hosts
++ state: present
++ line: '127.0.0.1 localhost'
++ regexp: '^127.0.0.1'
+diff --git a/playbooks/memcached-install.yml b/playbooks/memcached-install.yml
+index aa37e616..7e3e5b67 100644
+--- a/playbooks/memcached-install.yml
++++ b/playbooks/memcached-install.yml
+@@ -12,11 +12,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: baremetal-interface-config.yml
+
+ - name: Install memcached
+ hosts: memcached
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/os-log-dir-setup.yml
+diff --git a/playbooks/ntp-config.yml b/playbooks/ntp-config.yml
+new file mode 100644
+index 00000000..6360f977
+--- /dev/null
++++ b/playbooks/ntp-config.yml
+@@ -0,0 +1,40 @@
++---
++# Copyright 2014, Rackspace US, Inc.
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++# cmframework.requires: baremetal-interface-config.yml
++
++- name: Configure NTP service on controllers
++ hosts: shared-infra_hosts
++ gather_facts: "{{ gather_facts | default(True) }}"
++ max_fail_percentage: 20
++ tasks:
++ - name: Set Timezone
++ timezone:
++ name: "{{ time.zone }}"
++ roles:
++ - role: "ntp"
++ ntp_config_server: "{{ ntp_servers }}"
++
++- name: Configure NTP service on base hosts
++ hosts: [ base ]
++ gather_facts: "{{ gather_facts | default(True) }}"
++ max_fail_percentage: 20
++ tasks:
++ - name: Set Timezone
++ timezone:
++ name: "{{ time.zone }}"
++ roles:
++ - role: "ntp"
++ ntp_config_server: "{{ groups['galera_all'] }}"
++ when: ansible_hostname not in groups['management']
+diff --git a/playbooks/openstack-hosts-setup.yml b/playbooks/openstack-hosts-setup.yml
+index 4979b635..4f9405cc 100644
+--- a/playbooks/openstack-hosts-setup.yml
++++ b/playbooks/openstack-hosts-setup.yml
+@@ -22,7 +22,6 @@
+ - name: Install Ansible prerequisites
+ hosts: "{{ openstack_host_group|default('hosts') }}"
+ gather_facts: false
+- user: root
+ pre_tasks:
+ - name: Ensure python is installed
+ register: result
+@@ -39,7 +38,6 @@
+ - name: Basic host setup
+ hosts: "{{ openstack_host_group|default('hosts') }}"
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - name: Check for a supported Operating System
+ assert:
+diff --git a/playbooks/os-aodh-install.yml b/playbooks/os-aodh-install.yml
+index 311f63c6..a3c91573 100644
+--- a/playbooks/os-aodh-install.yml
++++ b/playbooks/os-aodh-install.yml
+@@ -13,10 +13,10 @@
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+
++# cmframework.requires: os-horizon-install.yml
+ - name: Install the aodh components
+ hosts: aodh_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/rabbitmq-vhost-user.yml
+@@ -48,6 +48,7 @@
+ - hostvars['localhost']['resolvconf_enabled'] | bool
+ roles:
+ - role: "os_aodh"
++ when: (reinitialized_nodes is not defined and scaled_out_nodes is not defined) or (reinitialized_nodes is defined and hostname in reinitialized_nodes) or (scaled_out_nodes is defined and hostname in scaled_out_nodes)
+ - role: "openstack_openrc"
+ tags:
+ - openrc
+diff --git a/playbooks/os-barbican-install.yml b/playbooks/os-barbican-install.yml
+index 3f433894..2b5203d2 100644
+--- a/playbooks/os-barbican-install.yml
++++ b/playbooks/os-barbican-install.yml
+@@ -16,7 +16,6 @@
+ - name: Installation and setup of barbican
+ hosts: barbican_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/rabbitmq-vhost-user.yml
+diff --git a/playbooks/os-ceilometer-install.yml b/playbooks/os-ceilometer-install.yml
+index 745a9b37..58b08bb5 100644
+--- a/playbooks/os-ceilometer-install.yml
++++ b/playbooks/os-ceilometer-install.yml
+@@ -16,7 +16,6 @@
+ - name: Install the ceilometer components
+ hosts: ceilometer_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/rabbitmq-vhost-user.yml
+diff --git a/playbooks/os-cinder-install.yml b/playbooks/os-cinder-install.yml
+index 23f83b8e..09194d7e 100644
+--- a/playbooks/os-cinder-install.yml
++++ b/playbooks/os-cinder-install.yml
+@@ -12,11 +12,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml,rsyslog-install.yml,os-keystone-install.yml,os-glance-install.yml,os-nova-install.yml,os-neutron-install.yml
+
+ - name: Prepare MQ/DB services
+ hosts: cinder_all
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - cinder
+@@ -96,7 +96,6 @@
+ - name: Refresh local facts after all software changes are made
+ hosts: cinder_all
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - cinder
+@@ -139,7 +138,6 @@
+ hosts: cinder_backup,cinder_volume,cinder_scheduler
+ gather_facts: no
+ serial: "{{ cinder_backend_serial | default(['1', '100%']) }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - cinder
+@@ -164,7 +162,6 @@
+ hosts: cinder_api
+ gather_facts: no
+ serial: "{{ cinder_api_serial | default(['1','100%']) }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - cinder
+@@ -203,7 +200,6 @@
+ - name: Perform online database migrations
+ hosts: cinder_api[0]
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - cinder
+diff --git a/playbooks/os-designate-install.yml b/playbooks/os-designate-install.yml
+index ed3b81da..a5174ea5 100644
+--- a/playbooks/os-designate-install.yml
++++ b/playbooks/os-designate-install.yml
+@@ -19,7 +19,6 @@
+ - name: Install designate server
+ hosts: designate_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/rabbitmq-vhost-user.yml
+diff --git a/playbooks/os-glance-install.yml b/playbooks/os-glance-install.yml
+index fdd2b1c2..3f46655e 100644
+--- a/playbooks/os-glance-install.yml
++++ b/playbooks/os-glance-install.yml
+@@ -12,11 +12,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml,rsyslog-install.yml,os-keystone-install.yml
+
+ - name: Prepare MQ/DB services
+ hosts: glance_all
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - glance
+@@ -80,7 +80,6 @@
+ - name: Refresh local facts after all software changes are made
+ hosts: glance_all
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - glance
+@@ -122,7 +121,6 @@
+ hosts: glance_api
+ gather_facts: no
+ serial: "{{ glance_api_serial | default(['1','100%']) }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - glance
+diff --git a/playbooks/os-gnocchi-install.yml b/playbooks/os-gnocchi-install.yml
+index 247ffa23..bbad3819 100644
+--- a/playbooks/os-gnocchi-install.yml
++++ b/playbooks/os-gnocchi-install.yml
+@@ -15,7 +15,6 @@
+
+ - name: Install Gnocchi components
+ hosts: gnocchi_all
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ static: no
+diff --git a/playbooks/os-heat-install.yml b/playbooks/os-heat-install.yml
+index aabcde88..eb0c1cde 100644
+--- a/playbooks/os-heat-install.yml
++++ b/playbooks/os-heat-install.yml
+@@ -12,11 +12,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml,rsyslog-install.yml,os-keystone-install.yml,os-glance-install.yml,os-nova-install.yml,os-neutron-install.yml,os-swift-install.yml,os-ironic-install.yml
+
+ - name: Install heat server
+ hosts: heat_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/rabbitmq-vhost-user.yml
+diff --git a/playbooks/os-horizon-install.yml b/playbooks/os-horizon-install.yml
+index bc538938..5020e9e9 100644
+--- a/playbooks/os-horizon-install.yml
++++ b/playbooks/os-horizon-install.yml
+@@ -12,11 +12,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml,rsyslog-install.yml,os-keystone-install.yml,os-glance-install.yml,os-nova-install.yml,os-neutron-install.yml,os-swift-install.yml,os-ironic-install.yml,os-heat-install.yml
+
+ - name: Install horizon server
+ hosts: horizon_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/os-log-dir-setup.yml
+@@ -38,6 +38,7 @@
+ - hostvars['localhost']['resolvconf_enabled'] | bool
+ roles:
+ - role: "os_horizon"
++ when: (reinitialized_nodes is not defined and scaled_out_nodes is not defined) or (reinitialized_nodes is defined and hostname in reinitialized_nodes) or (scaled_out_nodes is defined and hostname in scaled_out_nodes)
+ - role: "rsyslog_client"
+ rsyslog_client_log_rotate_file: horizon_log_rotate
+ rsyslog_client_log_dir: "/var/log/horizon"
+diff --git a/playbooks/os-ironic-install.yml b/playbooks/os-ironic-install.yml
+index 6c869687..c174c704 100644
+--- a/playbooks/os-ironic-install.yml
++++ b/playbooks/os-ironic-install.yml
+@@ -1,3 +1,4 @@
++#cmframework.requires: haproxy-install.yml, rabbitmq-install.yml, os-keystone-install.yml
+ ---
+ # Copyright 2016, Rackspace, Inc.
+ #
+@@ -12,11 +13,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml,rsyslog-install.yml,os-keystone-install.yml,os-glance-install.yml,os-nova-install.yml,os-neutron-install.yml,os-swift-install.yml
+
+ - name: Installation and setup of Ironic
+- hosts: ironic_all
++ hosts: baremetal_management_nodes
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/rabbitmq-vhost-user.yml
+diff --git a/playbooks/os-keystone-install.yml b/playbooks/os-keystone-install.yml
+index 7d1d9a37..6de4cdb6 100644
+--- a/playbooks/os-keystone-install.yml
++++ b/playbooks/os-keystone-install.yml
+@@ -12,11 +12,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml,rsyslog-install.yml
+
+ - name: Prepare MQ/DB services
+ hosts: keystone_all
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - keystone
+@@ -62,7 +62,6 @@
+ hosts: keystone_all
+ serial: "{{ keystone_serial | default(['1', '100%']) }}"
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - keystone
+@@ -117,6 +116,7 @@
+
+ roles:
+ - role: "os_keystone"
++ when: (reinitialized_nodes is not defined and scaled_out_nodes is not defined) or ((reinitialized_nodes is defined) and ((reinitialized_nodes | intersect(groups['keystone_all'])) | length > 0)) or (scaled_out_nodes is defined and hostname in scaled_out_nodes)
+ - role: "openstack_openrc"
+ tags:
+ - openrc
+@@ -150,7 +150,6 @@
+ - name: Finalise data migrations if required
+ hosts: keystone_all
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - keystone
+diff --git a/playbooks/os-magnum-install.yml b/playbooks/os-magnum-install.yml
+index 37d3eb56..743a8131 100644
+--- a/playbooks/os-magnum-install.yml
++++ b/playbooks/os-magnum-install.yml
+@@ -18,7 +18,6 @@
+
+ - name: Install magnum server
+ hosts: magnum_all
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/os-log-dir-setup.yml
+diff --git a/playbooks/os-neutron-install.yml b/playbooks/os-neutron-install.yml
+index 43288f7f..b9e71250 100644
+--- a/playbooks/os-neutron-install.yml
++++ b/playbooks/os-neutron-install.yml
+@@ -12,6 +12,7 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml,rsyslog-install.yml,os-keystone-install.yml,os-glance-install.yml,os-nova-install.yml
+
+ - name: Configure Neutron dynamic host groupings
+ hosts: localhost
+@@ -29,7 +30,6 @@
+ - name: Prepare MQ/DB services
+ hosts: neutron_all
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - neutron
+diff --git a/playbooks/os-nova-install.yml b/playbooks/os-nova-install.yml
+index be6414e9..129dbe3f 100644
+--- a/playbooks/os-nova-install.yml
++++ b/playbooks/os-nova-install.yml
+@@ -12,11 +12,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml,rsyslog-install.yml,os-keystone-install.yml,os-glance-install.yml
+
+ - name: Prepare MQ/DB services
+ hosts: nova_conductor
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - nova
+@@ -125,7 +125,6 @@
+ - name: Refresh local facts after all software changes are made
+ hosts: nova_all
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - nova
+@@ -170,7 +169,6 @@
+ hosts: "nova_all:!nova_api_placement:!nova_console"
+ gather_facts: no
+ serial: "{{ nova_serial | default('100%') }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - nova
+@@ -194,7 +192,6 @@
+ hosts: "nova_api_placement:nova_console"
+ gather_facts: no
+ serial: "{{ nova_api_serial | default(['1', '100%']) }}"
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - nova
+@@ -214,7 +211,6 @@
+ - name: Perform online database migrations
+ hosts: nova_conductor
+ gather_facts: no
+- user: root
+ environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - nova
+diff --git a/playbooks/os-rally-install.yml b/playbooks/os-rally-install.yml
+index 2fef255d..b961ab9d 100644
+--- a/playbooks/os-rally-install.yml
++++ b/playbooks/os-rally-install.yml
+@@ -16,7 +16,6 @@
+ - name: Installation and setup of Rally
+ hosts: utility_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: common-tasks/unbound-clients.yml
+ static: no
+diff --git a/playbooks/os-sahara-install.yml b/playbooks/os-sahara-install.yml
+index 09dc410b..b7e0fcb0 100644
+--- a/playbooks/os-sahara-install.yml
++++ b/playbooks/os-sahara-install.yml
+@@ -14,7 +14,6 @@
+ - name: Install sahara server
+ hosts: sahara_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/rabbitmq-vhost-user.yml
+diff --git a/playbooks/os-swift-install.yml b/playbooks/os-swift-install.yml
+index 87c11de9..5974ebef 100644
+--- a/playbooks/os-swift-install.yml
++++ b/playbooks/os-swift-install.yml
+@@ -12,11 +12,11 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml,rsyslog-install.yml,os-keystone-install.yml,os-glance-install.yml,os-nova-install.yml,os-neutron-install.yml
+
+ - name: Installation and setup of Swift
+ hosts: swift_all:swift_remote_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ vars:
+@@ -59,7 +59,6 @@
+
+ - name: Installation and setup of Swift
+ hosts: swift_all
+- user: root
+ roles:
+ - role: "rsyslog_client"
+ rsyslog_client_log_rotate_file: swift_log_rotate
+diff --git a/playbooks/os-swift-sync.yml b/playbooks/os-swift-sync.yml
+index 7d9f7a2e..eafadca2 100644
+--- a/playbooks/os-swift-sync.yml
++++ b/playbooks/os-swift-sync.yml
+@@ -18,7 +18,6 @@
+ - name: Synchronisation of swift ring and ssh keys
+ hosts: swift_all:swift_remote_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ roles:
+ - role: "os_swift"
+ swift_do_setup: False
+diff --git a/playbooks/os-tempest-install.yml b/playbooks/os-tempest-install.yml
+index e8b45434..6493107c 100644
+--- a/playbooks/os-tempest-install.yml
++++ b/playbooks/os-tempest-install.yml
+@@ -16,7 +16,6 @@
+ - name: Installation and setup of Tempest
+ hosts: utility_all[0]
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ roles:
+ - role: "os_tempest"
+ - role: "rsyslog_client"
+diff --git a/playbooks/os-trove-install.yml b/playbooks/os-trove-install.yml
+index d62e7125..64635664 100644
+--- a/playbooks/os-trove-install.yml
++++ b/playbooks/os-trove-install.yml
+@@ -19,7 +19,6 @@
+ - name: Install trove server
+ hosts: trove_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ - include: common-tasks/rabbitmq-vhost-user.yml
+diff --git a/playbooks/presetup-playbook.yml b/playbooks/presetup-playbook.yml
+new file mode 100644
+index 00000000..0c01a6a1
+--- /dev/null
++++ b/playbooks/presetup-playbook.yml
+@@ -0,0 +1,19 @@
++- name: Presetup tasks for installation controller node
++ hosts: localhost
++ gather_facts: True
++ user: root
++ tasks:
++ - name: Change ansible configuration
++ ini_file:
++ path: "/etc/ansible/ansible.cfg"
++ section: "{{item.section}}"
++ option: "{{item.option}}"
++ value: "{{item.value}}"
++ with_items:
++ - { section: defaults, option: "hash_behaviour", value: "merge" }
++ - { section: defaults, option: "forks", value: 50 }
++ - { section: ssh_connection, option: "control_path", value: "%(directory)s/%%h" }
++
++ - name: Setup hostname for Installation controller.
++ hostname:
++ name: "{{ installation_controller }}"
+diff --git a/playbooks/rabbitmq-install.yml b/playbooks/rabbitmq-install.yml
+index 8920e407..a38f205b 100644
+--- a/playbooks/rabbitmq-install.yml
++++ b/playbooks/rabbitmq-install.yml
+@@ -12,12 +12,12 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-remove-reinitialized.yml
+
+ - name: Create and configure rabbitmq container
+ hosts: "{{ rabbitmq_host_group | default('rabbitmq_all') }}"
+ serial: 1
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+ static:
+@@ -38,7 +38,6 @@
+ # http://www.rabbitmq.com/clustering.html#upgrading
+ - name: Stop RabbitMQ nodes that are not the upgrader
+ hosts: "{{ rabbitmq_host_group | default('rabbitmq_all') }}[1:]"
+- user: root
+ tasks:
+ - name: "Stop RabbitMQ"
+ service:
+@@ -49,7 +48,6 @@
+ - name: Install rabbitmq server
+ hosts: "{{ rabbitmq_host_group | default('rabbitmq_all') }}"
+ serial: 20%
+- user: root
+ roles:
+ - role: "rabbitmq_server"
+ - role: "rsyslog_client"
+@@ -64,7 +62,6 @@
+
+ - name: Ensure rabbitmq user for monitoring GUI
+ hosts: "{{ rabbitmq_host_group | default('rabbitmq_all') }}[0]"
+- user: root
+ tasks:
+ - name: Create rabbitmq user for monitoring GUI
+ rabbitmq_user:
+diff --git a/playbooks/rabbitmq-remove-reinitialized.yml b/playbooks/rabbitmq-remove-reinitialized.yml
+new file mode 100644
+index 00000000..5bed7aed
+--- /dev/null
++++ b/playbooks/rabbitmq-remove-reinitialized.yml
+@@ -0,0 +1,12 @@
++#cmframework.requires: provisioning_done.yml
++---
++- name: Remove reinitialized nodes from rabbitmq
++ hosts: [management]
++ tasks:
++ - shell: |
++ /sbin/rabbitmqctl cluster_status | grep {{item.key}}
++ if [ $? -eq 0 ]; then
++ /sbin/rabbitmqctl forget_cluster_node rabbit@{{item.key}}
++ fi
++ with_dict: "{{ reinitialized_nodes }}"
++ when: reinitialized_nodes is defined and inventory_hostname == installation_controller
+diff --git a/playbooks/repo-server.yml b/playbooks/repo-server.yml
+index ddb70b31..31b4c4b4 100644
+--- a/playbooks/repo-server.yml
++++ b/playbooks/repo-server.yml
+@@ -16,7 +16,6 @@
+ - name: Setup repo servers
+ hosts: repo_all
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ pre_tasks:
+
+ - include: common-tasks/set-upper-constraints.yml
+diff --git a/playbooks/rsyslog-install.yml b/playbooks/rsyslog-install.yml
+index 03a1b706..aa332669 100644
+--- a/playbooks/rsyslog-install.yml
++++ b/playbooks/rsyslog-install.yml
+@@ -12,30 +12,9 @@
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
++# cmframework.requires: memcached-install.yml,galera-install.yml,rabbitmq-install.yml
+
+ - name: Install rsyslog
+ hosts: rsyslog
+- gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+- pre_tasks:
+- - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+- vars:
+- list_of_bind_mounts:
+- - bind_dir_path: "{{ rsyslog_server_storage_directory }}"
+- mount_path: "/openstack/{{ inventory_hostname }}/log-storage"
+- extra_container_config_no_restart:
+- - "lxc.start.order=19"
+- - include: common-tasks/unbound-clients.yml
+- static: no
+- when:
+- - hostvars['localhost']['resolvconf_enabled'] | bool
+- roles:
+- - role: "rsyslog_server"
+- tags:
+- - rsyslog
+- - role: "system_crontab_coordination"
+- tags:
+- - crontab
+- environment: "{{ deployment_environment_variables | default({}) }}"
+ tags:
+ - rsyslog
+diff --git a/playbooks/security-hardening.yml b/playbooks/security-hardening.yml
+index 156aca8e..c9839738 100644
+--- a/playbooks/security-hardening.yml
++++ b/playbooks/security-hardening.yml
+@@ -20,7 +20,6 @@
+ - name: Apply security hardening configurations
+ hosts: "{{ security_host_group|default('hosts') }}"
+ gather_facts: "{{ osa_gather_facts | default(True) }}"
+- user: root
+ roles:
+ - role: "ansible-hardening"
+ when: apply_security_hardening | bool
+diff --git a/playbooks/setup-infrastructure.yml b/playbooks/setup-infrastructure.yml
+index 81f45499..dd4cec57 100644
+--- a/playbooks/setup-infrastructure.yml
++++ b/playbooks/setup-infrastructure.yml
+@@ -13,17 +13,16 @@
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+
+-- include: unbound-install.yml
+-- include: repo-install.yml
++#- include: unbound-install.yml
+ - include: haproxy-install.yml
+ # TODO(evrardjp): Remove the following when repo_build is done
+ # before lxc_container_create, and haproxy is moved with it as
+ # second step.
+ - include: repo-use.yml
+-- include: utility-install.yml
++#- include: utility-install.yml
+ - include: memcached-install.yml
+ - include: galera-install.yml
+ - include: rabbitmq-install.yml
+-- include: etcd-install.yml
+-- include: ceph-install.yml
++#- include: etcd-install.yml
++#- include: ceph-install.yml
+ - include: rsyslog-install.yml
+diff --git a/tests/roles/bootstrap-host/templates/user_variables_translations.yml.j2 b/playbooks/setup-playbook.yml
+similarity index 61%
+rename from tests/roles/bootstrap-host/templates/user_variables_translations.yml.j2
+rename to playbooks/setup-playbook.yml
+index 82cf3703..3685b5d8 100644
+--- a/tests/roles/bootstrap-host/templates/user_variables_translations.yml.j2
++++ b/playbooks/setup-playbook.yml
+@@ -1,5 +1,5 @@
+ ---
+-# Copyright 2017, Logan Vig <logan2211@gmail.com>
++# Copyright 2015, Rackspace US, Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+@@ -13,10 +13,10 @@
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+
+-# Trove settings for translations site.
+-trove_provider_net_name: flat-db
+-trove_service_net_phys_net: flat-db
+-trove_service_net_setup: True
+-trove_service_net_subnet_cidr: "172.29.232.0/22"
+-trove_service_net_allocation_pool_start: "172.29.233.110"
+-trove_service_net_allocation_pool_end: "172.29.233.200"
++- name: Bootstrap the controller-1 node
++ hosts: all:!localhost
++ gather_facts: True
++ user: root
++ roles:
++ - role: "secretsextend"
++ - role: "bootstrap-host"
+diff --git a/playbooks/utility-install.yml b/playbooks/utility-install.yml
+index b58fa142..9040a5f2 100644
+--- a/playbooks/utility-install.yml
++++ b/playbooks/utility-install.yml
+@@ -15,7 +15,6 @@
+
+ - name: Setup the utility location(s)
+ hosts: utility_all
+- user: root
+ pre_tasks:
+ - include: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
+
+diff --git a/scripts/openstack-ansible b/scripts/openstack-ansible
+new file mode 100755
+index 00000000..6295b2df
+--- /dev/null
++++ b/scripts/openstack-ansible
+@@ -0,0 +1,67 @@
++#!/usr/bin/env bash
++# Copyright 2014, Rackspace US, Inc.
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++#
++# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
++
++# OpenStack wrapper tool to ease the use of ansible with multiple variable files.
++
++export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/root/bin"
++
++function info() {
++ if [ "${ANSIBLE_NOCOLOR:-0}" -eq "1" ]; then
++ echo -e "${@}"
++ else
++ echo -e "\e[0;35m${@}\e[0m"
++ fi
++}
++
++# Figure out which Ansible binary was executed
++RUN_CMD=$(basename ${0})
++
++# Apply the OpenStack-Ansible configuration selectively.
++if [[ "${PWD}" == *"/opt/openstack-ansible"* ]] || [ "${RUN_CMD}" == "openstack-ansible" ]; then
++
++ # Source the Ansible configuration.
++ . /usr/local/bin/openstack-ansible.rc
++
++ # Check whether there are any user configuration files
++ if ls -1 /etc/openstack_deploy/user_*.yml &> /dev/null; then
++
++ # Discover the variable files.
++ VAR1="$(for i in $(ls /etc/openstack_deploy/user_*.yml); do echo -ne "-e @$i "; done)"
++ fi
++
++else
++
++ # If you're not executing 'openstack-ansible' and are
++ # not in the OSA git clone root, then do not source
++ # the configuration and do not add extra vars.
++ VAR1=""
++
++fi
++
++VAR1=${VAR1}" --skip-tags pip,install-apt,install-yum,memcached_server-install,haproxy_package-install,galera-server-pip-packages,galera-gpg-keys,galera-repos,galera-pre-yum-packages,galera-client-install,galera_server-install,rabbitmq-yum-packages,rabbitmq-plugin-config,common-log,bird,rsyslog-yum-packages,glance-install,cinder-install,nova-install,nova-pip-packages,nova-novnc-git,calico-pip-packages,dragonflow-pip-packages,neutron-pkg-install,heat-pkg-install,horizon-pkg-install,swift-pkg-install,swift-chk-hashes,ironic-l2,ironic-pkg-install,ceph-install,package-install -e @/etc/openstack_deploy/env.d/baremetal.yml -e @/etc/openstack_deploy/user_variables.yml -e @/etc/openstack_deploy/user_secrets.yml"
++
++# Provide information on the discovered variables.
++info "Variable files: \"${VAR1}\""
++
++SKIP_OPTS=""
++
++# Execute the Ansible command.
++if [ "${RUN_CMD}" == "openstack-ansible" ] || [ "${RUN_CMD}" == "ansible-playbook" ]; then
++ /usr/bin/ansible-playbook "${@}" ${VAR1} ${SKIP_OPTS}
++else
++ /usr/bin/${RUN_CMD} "${@}" ${SKIP_OPTS}
++fi
+diff --git a/scripts/openstack-ansible.rc b/scripts/openstack-ansible.rc
+index a24c4a42..e8796964 100644
+--- a/scripts/openstack-ansible.rc
++++ b/scripts/openstack-ansible.rc
+@@ -14,12 +14,15 @@
+
+ export ANSIBLE_RETRY_FILES_ENABLED="${ANSIBLE_RETRY_FILES_ENABLED:-False}"
+
+-export ANSIBLE_INVENTORY="${ANSIBLE_INVENTORY:-OSA_INVENTORY_PATH/dynamic_inventory.py,/etc/openstack_deploy/inventory.ini}"
++export ANSIBLE_INVENTORY="${ANSIBLE_INVENTORY:-/opt/openstack-ansible/inventory}"
+
+ export ANSIBLE_LOG_PATH="${ANSIBLE_LOG_PATH:-/openstack/log/ansible-logging/ansible.log}"
+-mkdir -p "$(dirname ${ANSIBLE_LOG_PATH})" || unset ANSIBLE_LOG_PATH
++sudo mkdir -p "$(dirname ${ANSIBLE_LOG_PATH})" || true
++sudo chmod o+rx /openstack/log || true
++sudo chown $USER:$USER "$(dirname ${ANSIBLE_LOG_PATH})" || unset ANSIBLE_LOG_PATH
+
+-export ANSIBLE_ROLES_PATH="${ANSIBLE_ROLES_PATH:-/etc/ansible/roles:OSA_PLAYBOOK_PATH/roles:/etc/ansible/roles/ceph-ansible/roles}"
++
++export ANSIBLE_ROLES_PATH="${ANSIBLE_ROLES_PATH:-/etc/ansible/roles:roles:/opt/openstack-ansible/playbooks/roles}"
+
+ export ANSIBLE_LIBRARY="${ANSIBLE_LIBRARY:-/etc/ansible/roles/plugins/library}"
+ export ANSIBLE_LOOKUP_PLUGINS="${ANSIBLE_LOOKUP_PLUGINS:-/etc/ansible/roles/plugins/lookup}"
+@@ -58,5 +61,5 @@ export ANSIBLE_FORCE_HANDLERS="${ANSIBLE_FORCE_HANDLERS:-True}"
+ # Allow the usage of userspace group_vars host_vars with user
+ # defined precedence until this behavior is merged in the
+ # inventory
+-export GROUP_VARS_PATH="${GROUP_VARS_PATH:-OSA_GROUP_VARS_DIR:/etc/openstack_deploy/group_vars/}"
+-export HOST_VARS_PATH="${HOST_VARS_PATH:-OSA_HOST_VARS_DIR:/etc/openstack_deploy/host_vars/}"
++export GROUP_VARS_PATH="${GROUP_VARS_PATH:-/opt/openstack-ansible/inventory/group_vars/}"
++export HOST_VARS_PATH="${HOST_VARS_PATH:-/opt/openstack-ansible/inventory/host_vars/}"
+diff --git a/scripts/pw-token-gen.py b/scripts/pw-token-gen.py
+index 6638d027..ed47c79f 100755
+--- a/scripts/pw-token-gen.py
++++ b/scripts/pw-token-gen.py
+@@ -1,4 +1,4 @@
+-#!/opt/ansible-runtime/bin/python
++#!/usr/bin/env python
+ # Copyright 2014, Rackspace US, Inc.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License");
+@@ -86,7 +86,7 @@ class CredentialGenerator(object):
+
+ :param encoded_bytes: ``str`` must be at least 64 charters long
+ """
+- return encoded_bytes[:random.randrange(16, 64)]
++ return "Z-"+encoded_bytes[:random.randrange(14, 62)]
+
+ def _token_gen(self, encoded_bytes):
+ """Returns ``str`` with a length between 48 and 64.
+@@ -189,7 +189,7 @@ def main():
+ print('Creating backup file [ %s ]' % user_vars_tar_file)
+ # Create a tarball if needed
+ with tarfile.open(user_vars_tar_file, 'a') as tar:
+- os.chmod(user_vars_tar_file, 0o600)
++ os.chmod(user_vars_tar_file, 0o644)
+ basename = os.path.basename(user_vars_file)
+ # Time stamp the password file in UTC
+ utctime = datetime.datetime.utcnow()
+@@ -198,7 +198,7 @@ def main():
+ tar.add(user_vars_file, arcname=backup_name)
+
+ with open(user_vars_file, 'w') as f:
+- os.chmod(user_vars_file, 0o600)
++ os.chmod(user_vars_file, 0o644)
+ f.write(
+ yaml.safe_dump(
+ user_vars,
+diff --git a/scripts/setup-controller.sh b/scripts/setup-controller.sh
+new file mode 100755
+index 00000000..3dcbf4bf
+--- /dev/null
++++ b/scripts/setup-controller.sh
+@@ -0,0 +1,47 @@
++#!/usr/bin/env bash
++#
++# Copyright 2014, Rackspace US, Inc.
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++
++## Shell Opts ----------------------------------------------------------------
++set -e -u
++export ANSIBLE_LIBRARY="${ANSIBLE_LIBRARY:-/etc/ansible/roles/plugins/library}"
++export ANSIBLE_LOOKUP_PLUGINS="${ANSIBLE_LOOKUP_PLUGINS:-/etc/ansible/roles/plugins/lookup}"
++export ANSIBLE_FILTER_PLUGINS="${ANSIBLE_FILTER_PLUGINS:-/etc/ansible/roles/plugins/filter}"
++export ANSIBLE_ACTION_PLUGINS="${ANSIBLE_ACTION_PLUGINS:-/etc/ansible/roles/plugins/action}"
++export ANSIBLE_CALLBACK_PLUGINS="${ANSIBLE_CALLBACK_PLUGINS:-/etc/ansible/roles/plugins/callback}"
++export ANSIBLE_TEST_PLUGINS="${ANSIBLE_TEST_PLUGINS:-/etc/ansible/roles/plugins/test}"
++
++## Variables -----------------------------------------------------------------
++# Extra options to pass to the AIO bootstrap process
++export BOOTSTRAP_OPTS=${BOOTSTRAP_OPTS:-''}
++
++if [[ -v VNF_EMBEDDED_DEPLOYMENT ]] && [[ ${VNF_EMBEDDED_DEPLOYMENT} = 'true' ]]; then
++ SKIP_OPTS="--skip-tags check-requirements,deploy-confd,remove-packages,install-packages,prepare-ceph,prepare-loopback-nova,prepare-data-disk,prepare-loopback-swap,prepare-ssh-keys,prepare-networking,deploy-openstack-user-config,prepare-loopback-swift,prepare-os-net-config,ssh-key-authorized,check-disk-size"
++else
++ SKIP_OPTS="--skip-tags deploy-confd,remove-packages,install-packages,prepare-ceph,prepare-loopback-nova,prepare-data-disk,prepare-loopback-swap,prepare-ssh-keys,prepare-networking,deploy-openstack-user-config,prepare-loopback-swift"
++fi
++
++export CONFIG_PHASE='setup'
++## Main ----------------------------------------------------------------------
++
++# Run AIO bootstrap playbook
++if [ -z "${BOOTSTRAP_OPTS}" ]; then
++ ansible-playbook $1 \
++ -i /opt/cmframework/scripts/inventory.sh ${SKIP_OPTS}
++else
++ ansible-playbook $1 \
++ -i /opt/cmframework/scripts/inventory.sh \
++ -e "${BOOTSTRAP_OPTS}"
++fi
+diff --git a/tests/openstack_inventory_aio.json b/tests/openstack_inventory_aio.json
+new file mode 100644
+index 00000000..8d027878
+--- /dev/null
++++ b/tests/openstack_inventory_aio.json
+@@ -0,0 +1,1546 @@
++{
++ "_meta": {
++ "hostvars": {
++ "controller-1": {
++ "ansible_host": "172.29.236.11",
++ "cinder_backends": {
++ "lvm": {
++ "iscsi_ip_address": "172.29.236.11",
++ "volume_backend_name": "LVM_iSCSI",
++ "volume_driver": "cinder.volume.drivers.lvm.LVMVolumeDriver",
++ "volume_group": "cinder-volumes"
++ }
++ },
++ "component": "heat_engine",
++ "container_address": "172.29.236.11",
++ "container_name": "controller-1",
++ "container_networks": {
++ "eth0_address": {
++ "address": "172.29.236.11",
++ "bridge": "eth0",
++ "netmask": null,
++ "type": "veth"
++ },
++ "storage_address": {
++ "bridge": "vlan10",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ },
++ "tunnel_address": {
++ "bridge": "vlan20",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ }
++ },
++ "container_types": "controller-1-host_containers",
++ "is_metal": true,
++ "physical_host": "controller-1",
++ "physical_host_group": "orchestration_hosts",
++ "properties": {
++ "is_metal": true,
++ "service_name": "heat"
++ },
++ "swift_proxy_vars": {
++ "read_affinity": "r1=100",
++ "write_affinity": "r1",
++ "write_affinity_node_count": "1 * replicas"
++ },
++ "swift_vars": {
++ "region": 1,
++ "zone": 0
++ }
++ }
++ }
++ },
++ "all": {
++ "vars": {
++ "baremetal_flavor": [
++ {
++ "disk": 10,
++ "extra_specs": {
++ "capabilities:boot_option": "local",
++ "cpu_arch": "x86_64"
++ },
++ "name": "baremetal",
++ "ram": 13999,
++ "vcpus": 8
++ }
++ ],
++ "baremetal_images": [
++ {
++ "container_format": "bare",
++ "disk_format": "raw",
++ "file": "/opt/ncio/overcloudimages/guest-image.img",
++ "name": "golden"
++ },
++ {
++ "container_format": "aki",
++ "disk_format": "aki",
++ "file": "/opt/ncio/overcloudimages/ironic-python-agent.kernel",
++ "name": "ipa-kernel"
++ },
++ {
++ "container_format": "ari",
++ "disk_format": "ari",
++ "file": "/opt/ncio/overcloudimages/ironic-python-agent.initramfs",
++ "name": "ipa-ramdisk"
++ }
++ ],
++ "baremetal_ironic_nodes": [
++ {
++ "driver": "pxe_ssh",
++ "driver_info": {
++ "power": {
++ "ssh_address": "192.168.122.1",
++ "ssh_key_contents": "{{ lookup('file', '/tmp/id_rsa') }}",
++ "ssh_username": "root",
++ "ssh_virt_type": "virsh"
++ }
++ },
++ "name": "controller-2",
++ "network_interface": "flat",
++ "nics": [
++ {
++ "mac": "00:0d:4f:4a:d6:7e"
++ }
++ ],
++ "properties": {
++ "capabilities": "boot_option:local",
++ "cpu_arch": "x86_64",
++ "cpus": 8,
++ "disk_size": 40,
++ "ram": 16384,
++ "root_device": {
++ "name": "/dev/vda"
++ }
++ }
++ },
++ {
++ "driver": "pxe_ssh",
++ "driver_info": {
++ "power": {
++ "ssh_address": "192.168.122.1",
++ "ssh_key_contents": "{{ lookup('file', '/tmp/id_rsa') }}",
++ "ssh_username": "root",
++ "ssh_virt_type": "virsh"
++ }
++ },
++ "name": "controller-3",
++ "network_interface": "flat",
++ "nics": [
++ {
++ "mac": "00:0d:5f:49:d6:7e"
++ }
++ ],
++ "properties": {
++ "capabilities": "boot_option:local",
++ "cpu_arch": "x86_64",
++ "cpus": 8,
++ "disk_size": 40,
++ "ram": 16384,
++ "root_device": {
++ "name": "/dev/vda"
++ }
++ }
++ },
++ {
++ "driver": "pxe_ssh",
++ "driver_info": {
++ "power": {
++ "ssh_address": "192.168.122.1",
++ "ssh_key_contents": "{{ lookup('file', '/tmp/id_rsa') }}",
++ "ssh_username": "root",
++ "ssh_virt_type": "virsh"
++ }
++ },
++ "name": "compute-1",
++ "network_interface": "flat",
++ "nics": [
++ {
++ "mac": "00:0d:4f:49:d6:7e"
++ }
++ ],
++ "properties": {
++ "capabilities": "boot_option:local",
++ "cpu_arch": "x86_64",
++ "cpus": 8,
++ "disk_size": 40,
++ "ram": 16384,
++ "root_device": {
++ "name": "/dev/vda"
++ }
++ }
++ }
++ ],
++ "baremetal_networks": [
++ {
++ "allocation_pool_end": "172.29.236.50",
++ "allocation_pool_start": "172.29.236.12",
++ "cidr": "172.29.236.0/24",
++ "net_name": "provisioning_net",
++ "provider_network_type": "flat",
++ "provider_physical_network": "flat",
++ "subnet_name": "provisioning_subnet"
++ }
++ ],
++ "baremetal_nova_nodes": [
++ {
++ "flavor_name": "baremetal",
++ "image_name": "golden",
++ "networks_list": [
++ {
++ "net-name": "provisioning_net",
++ "v4-fixed-ip": "172.29.236.14"
++ }
++ ],
++ "node_name": "controller-2",
++ "userdata": "#cloud-config\nusers:\n - name: ncioadmin\n sudo: ['ALL=(ALL) NOPASSWD:ALL']\n ssh-authorized-keys:\n - \"{{ lookup('file', ansible_env.PWD + '/.ssh/id_rsa.pub') }}\"\n"
++ },
++ {
++ "flavor_name": "baremetal",
++ "image_name": "golden",
++ "networks_list": [
++ {
++ "net-name": "provisioning_net",
++ "v4-fixed-ip": "172.29.236.15"
++ }
++ ],
++ "node_name": "controller-3",
++ "userdata": "#cloud-config\nusers:\n - name: ncioadmin\n sudo: ['ALL=(ALL) NOPASSWD:ALL']\n ssh-authorized-keys:\n - \"{{ lookup('file', ansible_env.PWD + '/.ssh/id_rsa.pub') }}\"\n"
++ },
++ {
++ "flavor_name": "baremetal",
++ "image_name": "golden",
++ "networks_list": [
++ {
++ "net-name": "provisioning_net",
++ "v4-fixed-ip": "172.29.236.16"
++ }
++ ],
++ "node_name": "compute-1",
++ "userdata": "#cloud-config\nusers:\n - name: ncioadmin\n sudo: ['ALL=(ALL) NOPASSWD:ALL']\n ssh-authorized-keys:\n - \"{{ lookup('file', ansible_env.PWD + '/.ssh/id_rsa.pub') }}\"\n"
++ }
++ ],
++ "container_cidr": "172.29.236.0/24",
++ "external_lb_vip_address": "192.168.122.31",
++ "internal_lb_vip_address": "172.29.236.11",
++ "management_bridge": "eth0",
++ "ntp_servers": [
++ "10.104.45.1"
++ ],
++ "provider_networks": [
++ {
++ "network": {
++ "container_bridge": "eth0",
++ "container_interface": "eth0",
++ "container_type": "veth",
++ "group_binds": [
++ "neutron_linuxbridge_agent",
++ "all_containers",
++ "hosts"
++ ],
++ "host_bind_override": "eth0",
++ "is_container_address": true,
++ "is_ssh_address": true,
++ "net_name": "flat",
++ "type": "flat"
++ }
++ },
++ {
++ "network": {
++ "container_bridge": "vlan20",
++ "container_interface": "vlan20",
++ "container_type": "veth",
++ "group_binds": [
++ "neutron_linuxbridge_agent"
++ ],
++ "ip_from_q": "tunnel",
++ "net_name": "vxlan",
++ "range": "1:1000",
++ "type": "vxlan"
++ }
++ },
++ {
++ "network": {
++ "container_bridge": "vlan10",
++ "container_interface": "vlan10",
++ "container_type": "veth",
++ "group_binds": [
++ "glance_api",
++ "cinder_api",
++ "cinder_volume",
++ "nova_compute",
++ "swift_proxy"
++ ],
++ "ip_from_q": "storage",
++ "type": "raw"
++ }
++ }
++ ],
++ "swift": {
++ "drives": [
++ {
++ "name": "swift1.img"
++ },
++ {
++ "name": "swift2.img"
++ },
++ {
++ "name": "swift3.img"
++ }
++ ],
++ "mount_point": "/srv",
++ "part_power": 8,
++ "replication_network": "vlan10",
++ "storage_network": "vlan10",
++ "storage_policies": [
++ {
++ "policy": {
++ "default": true,
++ "index": 0,
++ "name": "default"
++ }
++ }
++ ]
++ },
++ "tunnel_bridge": "vlan20"
++ }
++ },
++ "all_containers": {
++ "children": [
++ "unbound_containers",
++ "ceph-osd_containers",
++ "orchestration_containers",
++ "operator_containers",
++ "memcaching_containers",
++ "metering-infra_containers",
++ "trove-infra_containers",
++ "ironic-infra_containers",
++ "ceph-mon_containers",
++ "swift_containers",
++ "storage_containers",
++ "ironic-server_containers",
++ "mq_containers",
++ "shared-infra_containers",
++ "compute_containers",
++ "storage-infra_containers",
++ "swift-proxy_containers",
++ "haproxy_containers",
++ "key-manager_containers",
++ "metering-alarm_containers",
++ "magnum-infra_containers",
++ "network_containers",
++ "sahara-infra_containers",
++ "os-infra_containers",
++ "image_containers",
++ "compute-infra_containers",
++ "log_containers",
++ "ironic-compute_containers",
++ "metering-compute_containers",
++ "identity_containers",
++ "dashboard_containers",
++ "dnsaas_containers",
++ "database_containers",
++ "metrics_containers",
++ "repo-infra_containers"
++ ],
++ "hosts": []
++ },
++ "aodh_alarm_evaluator": {
++ "children": [],
++ "hosts": []
++ },
++ "aodh_alarm_notifier": {
++ "children": [],
++ "hosts": []
++ },
++ "aodh_all": {
++ "children": [
++ "aodh_alarm_notifier",
++ "aodh_api",
++ "aodh_alarm_evaluator",
++ "aodh_listener"
++ ],
++ "hosts": []
++ },
++ "aodh_api": {
++ "children": [],
++ "hosts": []
++ },
++ "aodh_container": {
++ "hosts": []
++ },
++ "aodh_listener": {
++ "children": [],
++ "hosts": []
++ },
++ "barbican_all": {
++ "children": [
++ "barbican_api"
++ ],
++ "hosts": []
++ },
++ "barbican_api": {
++ "children": [],
++ "hosts": []
++ },
++ "barbican_container": {
++ "hosts": []
++ },
++ "baremetal-infra_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ceilometer_agent_central": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_agent_compute": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_agent_notification": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_all": {
++ "children": [
++ "ceilometer_agent_central",
++ "ceilometer_agent_notification",
++ "ceilometer_api",
++ "ceilometer_collector",
++ "ceilometer_agent_compute"
++ ],
++ "hosts": []
++ },
++ "ceilometer_api": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_api_container": {
++ "hosts": []
++ },
++ "ceilometer_collector": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_collector_container": {
++ "hosts": []
++ },
++ "ceph-mon": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-mon_container": {
++ "hosts": []
++ },
++ "ceph-mon_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-mon_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-osd": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-osd_container": {
++ "hosts": []
++ },
++ "ceph-osd_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-osd_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph_all": {
++ "children": [
++ "ceph-mon",
++ "ceph-osd"
++ ],
++ "hosts": []
++ },
++ "cinder_all": {
++ "children": [
++ "cinder_api",
++ "cinder_backup",
++ "cinder_volume",
++ "cinder_scheduler"
++ ],
++ "hosts": []
++ },
++ "cinder_api": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_api_container": {
++ "hosts": []
++ },
++ "cinder_backup": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_scheduler": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_scheduler_container": {
++ "hosts": []
++ },
++ "cinder_volume": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_volumes_container": {
++ "hosts": []
++ },
++ "compute-infra_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "compute-infra_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "compute-infra_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "compute_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "compute_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "dashboard_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "dashboard_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "dashboard_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "database_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "database_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_all": {
++ "children": [
++ "designate_producer",
++ "designate_mdns",
++ "designate_api",
++ "designate_worker",
++ "designate_central",
++ "designate_sink"
++ ],
++ "hosts": []
++ },
++ "designate_api": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_central": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_container": {
++ "hosts": []
++ },
++ "designate_mdns": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_producer": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_sink": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_worker": {
++ "children": [],
++ "hosts": []
++ },
++ "dnsaas_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "dnsaas_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "galera": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "galera_all": {
++ "children": [
++ "galera"
++ ],
++ "hosts": []
++ },
++ "galera_container": {
++ "hosts": []
++ },
++ "glance_all": {
++ "children": [
++ "glance_registry",
++ "glance_api"
++ ],
++ "hosts": []
++ },
++ "glance_api": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "glance_container": {
++ "hosts": []
++ },
++ "glance_registry": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "gnocchi_all": {
++ "children": [
++ "gnocchi_api",
++ "gnocchi_metricd"
++ ],
++ "hosts": []
++ },
++ "gnocchi_api": {
++ "children": [],
++ "hosts": []
++ },
++ "gnocchi_container": {
++ "hosts": []
++ },
++ "gnocchi_metricd": {
++ "children": [],
++ "hosts": []
++ },
++ "haproxy": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "haproxy_all": {
++ "children": [
++ "haproxy"
++ ],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "haproxy_container": {
++ "hosts": []
++ },
++ "haproxy_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "haproxy_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "heat_all": {
++ "children": [
++ "heat_api",
++ "heat_engine",
++ "heat_api_cloudwatch",
++ "heat_api_cfn"
++ ],
++ "hosts": []
++ },
++ "heat_api": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "heat_api_cfn": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "heat_api_cloudwatch": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "heat_apis_container": {
++ "hosts": []
++ },
++ "heat_engine": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "heat_engine_container": {
++ "hosts": []
++ },
++ "horizon": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "horizon_all": {
++ "children": [
++ "horizon"
++ ],
++ "hosts": []
++ },
++ "horizon_container": {
++ "hosts": []
++ },
++ "hosts": {
++ "children": [
++ "memcaching_hosts",
++ "metering-compute_hosts",
++ "image_hosts",
++ "shared-infra_hosts",
++ "storage_hosts",
++ "metering-infra_hosts",
++ "os-infra_hosts",
++ "ironic-server_hosts",
++ "key-manager_hosts",
++ "ceph-osd_hosts",
++ "dnsaas_hosts",
++ "network_hosts",
++ "haproxy_hosts",
++ "mq_hosts",
++ "database_hosts",
++ "swift-proxy_hosts",
++ "trove-infra_hosts",
++ "ironic-compute_hosts",
++ "metering-alarm_hosts",
++ "log_hosts",
++ "ceph-mon_hosts",
++ "compute_hosts",
++ "orchestration_hosts",
++ "compute-infra_hosts",
++ "identity_hosts",
++ "unbound_hosts",
++ "swift_hosts",
++ "sahara-infra_hosts",
++ "magnum-infra_hosts",
++ "ironic-infra_hosts",
++ "metrics_hosts",
++ "dashboard_hosts",
++ "storage-infra_hosts",
++ "operator_hosts",
++ "repo-infra_hosts"
++ ],
++ "hosts": []
++ },
++ "identity_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "identity_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "identity_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "image_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "image_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "image_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic-compute_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic-compute_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "ironic-compute_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic-infra_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic-infra_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "ironic-infra_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic-server_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "ironic-server_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "ironic_all": {
++ "children": [
++ "ironic_conductor",
++ "ironic_api"
++ ],
++ "hosts": []
++ },
++ "ironic_api": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic_api_container": {
++ "hosts": []
++ },
++ "ironic_compute": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic_compute_container": {
++ "hosts": []
++ },
++ "ironic_conductor": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic_conductor_container": {
++ "hosts": []
++ },
++ "ironic_server": {
++ "children": [],
++ "hosts": []
++ },
++ "ironic_server_container": {
++ "hosts": []
++ },
++ "ironic_servers": {
++ "children": [
++ "ironic_server"
++ ],
++ "hosts": []
++ },
++ "key-manager_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "key-manager_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "keystone": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "keystone_all": {
++ "children": [
++ "keystone"
++ ],
++ "hosts": []
++ },
++ "keystone_container": {
++ "hosts": []
++ },
++ "log_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "log_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "log_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "lxc_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "magnum": {
++ "children": [],
++ "hosts": []
++ },
++ "magnum-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "magnum-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "magnum_all": {
++ "children": [
++ "magnum"
++ ],
++ "hosts": []
++ },
++ "magnum_container": {
++ "hosts": []
++ },
++ "memcached": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "memcached_all": {
++ "children": [
++ "memcached"
++ ],
++ "hosts": []
++ },
++ "memcached_container": {
++ "hosts": []
++ },
++ "memcaching_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "memcaching_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-alarm_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-alarm_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-compute_container": {
++ "hosts": []
++ },
++ "metering-compute_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-compute_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metrics_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metrics_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "mq_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "mq_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "network_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "network_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "network_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_agent": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_agents_container": {
++ "hosts": []
++ },
++ "neutron_all": {
++ "children": [
++ "neutron_agent",
++ "neutron_metadata_agent",
++ "neutron_linuxbridge_agent",
++ "neutron_bgp_dragent",
++ "neutron_dhcp_agent",
++ "neutron_lbaas_agent",
++ "neutron_l3_agent",
++ "neutron_metering_agent",
++ "neutron_server",
++ "neutron_sriov_nic_agent",
++ "neutron_openvswitch_agent"
++ ],
++ "hosts": []
++ },
++ "neutron_bgp_dragent": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_dhcp_agent": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_l3_agent": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_lbaas_agent": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_linuxbridge_agent": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_metadata_agent": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_metering_agent": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_openvswitch_agent": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_server": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "neutron_server_container": {
++ "hosts": []
++ },
++ "neutron_sriov_nic_agent": {
++ "children": [],
++ "hosts": []
++ },
++ "nova_all": {
++ "children": [
++ "nova_console",
++ "nova_scheduler",
++ "ironic_compute",
++ "nova_api_placement",
++ "nova_api_metadata",
++ "nova_api_os_compute",
++ "nova_conductor",
++ "nova_compute"
++ ],
++ "hosts": []
++ },
++ "nova_api_metadata": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "nova_api_metadata_container": {
++ "hosts": []
++ },
++ "nova_api_os_compute": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "nova_api_os_compute_container": {
++ "hosts": []
++ },
++ "nova_api_placement": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "nova_api_placement_container": {
++ "hosts": []
++ },
++ "nova_compute": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "nova_compute_container": {
++ "hosts": []
++ },
++ "nova_conductor": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "nova_conductor_container": {
++ "hosts": []
++ },
++ "nova_console": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "nova_console_container": {
++ "hosts": []
++ },
++ "nova_scheduler": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "nova_scheduler_container": {
++ "hosts": []
++ },
++ "operator_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "operator_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "orchestration_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "orchestration_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "orchestration_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "os-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "os-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "pkg_repo": {
++ "children": [],
++ "hosts": []
++ },
++ "rabbit_mq_container": {
++ "hosts": []
++ },
++ "rabbitmq": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "rabbitmq_all": {
++ "children": [
++ "rabbitmq"
++ ],
++ "hosts": []
++ },
++ "remote": {
++ "children": [
++ "swift-remote_hosts"
++ ],
++ "hosts": []
++ },
++ "remote_containers": {
++ "children": [
++ "swift-remote_containers"
++ ],
++ "hosts": []
++ },
++ "repo-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "repo-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "repo_all": {
++ "children": [
++ "pkg_repo"
++ ],
++ "hosts": []
++ },
++ "repo_container": {
++ "hosts": []
++ },
++ "rsyslog": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "rsyslog_all": {
++ "children": [
++ "rsyslog"
++ ],
++ "hosts": []
++ },
++ "rsyslog_container": {
++ "hosts": []
++ },
++ "sahara-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "sahara-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "sahara_all": {
++ "children": [
++ "sahara_api",
++ "sahara_engine"
++ ],
++ "hosts": []
++ },
++ "sahara_api": {
++ "children": [],
++ "hosts": []
++ },
++ "sahara_container": {
++ "hosts": []
++ },
++ "sahara_engine": {
++ "children": [],
++ "hosts": []
++ },
++ "shared-infra_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "shared-infra_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "shared-infra_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage-infra_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage-infra_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "storage-infra_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "storage_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift-proxy_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift-proxy_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "swift-proxy_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift-remote_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "swift-remote_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "swift_acc": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_acc_container": {
++ "hosts": []
++ },
++ "swift_all": {
++ "children": [
++ "swift_cont",
++ "swift_obj",
++ "swift_proxy",
++ "swift_acc"
++ ],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_cont": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_cont_container": {
++ "hosts": []
++ },
++ "swift_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "swift_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_obj": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_obj_container": {
++ "hosts": []
++ },
++ "swift_proxy": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_proxy_container": {
++ "hosts": []
++ },
++ "swift_remote": {
++ "children": [],
++ "hosts": []
++ },
++ "swift_remote_all": {
++ "children": [
++ "swift_remote"
++ ],
++ "hosts": []
++ },
++ "swift_remote_container": {
++ "hosts": []
++ },
++ "trove-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "trove-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_all": {
++ "children": [
++ "trove_conductor",
++ "trove_taskmanager",
++ "trove_api"
++ ],
++ "hosts": []
++ },
++ "trove_api": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_api_container": {
++ "hosts": []
++ },
++ "trove_conductor": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_conductor_container": {
++ "hosts": []
++ },
++ "trove_taskmanager": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_taskmanager_container": {
++ "hosts": []
++ },
++ "unbound": {
++ "children": [],
++ "hosts": []
++ },
++ "unbound_all": {
++ "children": [
++ "unbound"
++ ],
++ "hosts": []
++ },
++ "unbound_container": {
++ "hosts": []
++ },
++ "unbound_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "unbound_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "utility": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "utility_all": {
++ "children": [
++ "utility"
++ ],
++ "hosts": []
++ },
++ "utility_container": {
++ "hosts": []
++ }
++}
+\ No newline at end of file
+diff --git a/tests/openstack_inventory_prod.json b/tests/openstack_inventory_prod.json
+new file mode 100644
+index 00000000..5ab7555c
+--- /dev/null
++++ b/tests/openstack_inventory_prod.json
+@@ -0,0 +1,1777 @@
++{
++ "_meta": {
++ "hostvars": {
++ "compute-1": {
++ "ansible_host": "172.29.236.16",
++ "component": "nova_compute",
++ "container_address": "172.29.236.16",
++ "container_name": "compute-1",
++ "container_networks": {
++ "eth0_address": {
++ "address": "172.29.236.16",
++ "bridge": "eth0",
++ "netmask": null,
++ "type": "veth"
++ },
++ "storage_address": {
++ "bridge": "vlan10",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ },
++ "tunnel_address": {
++ "bridge": "vlan20",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ }
++ },
++ "container_types": "compute-1-host_containers",
++ "is_metal": true,
++ "os_net_config": {
++ "network_config": [
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.236.16/24"
++ }
++ ],
++ "dns_servers": [
++ "172.29.236.11"
++ ],
++ "name": "eth0",
++ "type": "interface",
++ "use_dhcp": false
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.244.16/24"
++ }
++ ],
++ "device": "eth0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 10
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.240.16/24"
++ }
++ ],
++ "device": "eth0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 20
++ }
++ ]
++ },
++ "physical_host": "compute-1",
++ "physical_host_group": "compute_hosts",
++ "properties": {
++ "is_metal": true,
++ "service_name": "nova"
++ }
++ },
++ "controller-1": {
++ "ansible_host": "172.29.236.11",
++ "cinder_backends": {
++ "lvm": {
++ "iscsi_ip_address": "172.29.236.11",
++ "volume_backend_name": "LVM_iSCSI",
++ "volume_driver": "cinder.volume.drivers.lvm.LVMVolumeDriver",
++ "volume_group": "cinder-volumes"
++ }
++ },
++ "component": "heat_engine",
++ "container_address": "172.29.236.11",
++ "container_name": "controller-1",
++ "container_networks": {
++ "eth0_address": {
++ "address": "172.29.236.11",
++ "bridge": "eth0",
++ "netmask": null,
++ "type": "veth"
++ },
++ "storage_address": {
++ "bridge": "vlan10",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ },
++ "tunnel_address": {
++ "bridge": "vlan20",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ }
++ },
++ "container_types": "controller-1-host_containers",
++ "is_metal": true,
++ "physical_host": "controller-1",
++ "physical_host_group": "orchestration_hosts",
++ "properties": {
++ "is_metal": true,
++ "service_name": "heat"
++ },
++ "swift_proxy_vars": {
++ "read_affinity": "r1=100",
++ "write_affinity": "r1",
++ "write_affinity_node_count": "1 * replicas"
++ },
++ "swift_vars": {
++ "region": 1,
++ "zone": 0
++ }
++ },
++ "controller-2": {
++ "ansible_host": "172.29.236.14",
++ "component": "heat_engine",
++ "container_address": "172.29.236.14",
++ "container_name": "controller-2",
++ "container_networks": {
++ "eth0_address": {
++ "address": "172.29.236.14",
++ "bridge": "eth0",
++ "netmask": null,
++ "type": "veth"
++ },
++ "storage_address": {
++ "bridge": "vlan10",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ },
++ "tunnel_address": {
++ "bridge": "vlan20",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ }
++ },
++ "container_types": "controller-2-host_containers",
++ "is_metal": true,
++ "os_net_config": {
++ "network_config": [
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.236.14/24"
++ }
++ ],
++ "dns_servers": [
++ "172.29.236.11",
++ "10.102.12.68"
++ ],
++ "name": "eth0",
++ "type": "interface",
++ "use_dhcp": false
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.244.14/24"
++ }
++ ],
++ "device": "eth0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 10
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.240.14/24"
++ }
++ ],
++ "device": "eth0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 20
++ },
++ {
++ "name": "eth1",
++ "type": "interface",
++ "use_dhcp": true
++ }
++ ]
++ },
++ "physical_host": "controller-2",
++ "physical_host_group": "orchestration_hosts",
++ "properties": {
++ "is_metal": true,
++ "service_name": "heat"
++ }
++ },
++ "controller-3": {
++ "ansible_host": "172.29.236.15",
++ "component": "heat_engine",
++ "container_address": "172.29.236.15",
++ "container_name": "controller-3",
++ "container_networks": {
++ "eth0_address": {
++ "address": "172.29.236.15",
++ "bridge": "eth0",
++ "netmask": null,
++ "type": "veth"
++ },
++ "storage_address": {
++ "bridge": "vlan10",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ },
++ "tunnel_address": {
++ "bridge": "vlan20",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ }
++ },
++ "container_types": "controller-3-host_containers",
++ "is_metal": true,
++ "os_net_config": {
++ "network_config": [
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.236.15/24"
++ }
++ ],
++ "dns_servers": [
++ "172.29.236.11",
++ "10.102.12.68"
++ ],
++ "name": "eth0",
++ "type": "interface",
++ "use_dhcp": false
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.244.15/24"
++ }
++ ],
++ "device": "eth0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 10
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.240.15/24"
++ }
++ ],
++ "device": "eth0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 20
++ },
++ {
++ "name": "eth1",
++ "type": "interface",
++ "use_dhcp": true
++ }
++ ]
++ },
++ "physical_host": "controller-3",
++ "physical_host_group": "orchestration_hosts",
++ "properties": {
++ "is_metal": true,
++ "service_name": "heat"
++ }
++ }
++ }
++ },
++ "all": {
++ "vars": {
++ "baremetal_flavor": [
++ {
++ "disk": 10,
++ "extra_specs": {
++ "capabilities:boot_option": "local",
++ "cpu_arch": "x86_64"
++ },
++ "name": "baremetal",
++ "ram": 13999,
++ "vcpus": 8
++ }
++ ],
++ "baremetal_images": [
++ {
++ "container_format": "bare",
++ "disk_format": "raw",
++ "file": "/opt/ncio/overcloudimages/guest-image.img",
++ "name": "golden"
++ },
++ {
++ "container_format": "aki",
++ "disk_format": "aki",
++ "file": "/opt/ncio/overcloudimages/ironic-python-agent.kernel",
++ "name": "ipa-kernel"
++ },
++ {
++ "container_format": "ari",
++ "disk_format": "ari",
++ "file": "/opt/ncio/overcloudimages/ironic-python-agent.initramfs",
++ "name": "ipa-ramdisk"
++ }
++ ],
++ "baremetal_networks": [
++ {
++ "allocation_pool_end": "172.29.236.50",
++ "allocation_pool_start": "172.29.236.12",
++ "cidr": "172.29.236.0/24",
++ "net_name": "provisioning_net",
++ "provider_network_type": "flat",
++ "provider_physical_network": "flat",
++ "subnet_name": "provisioning_subnet"
++ }
++ ],
++ "container_cidr": "172.29.236.0/24",
++ "external_lb_vip_address": "192.168.122.10",
++ "galera_initial_setup": false,
++ "internal_lb_vip_address": "172.29.236.110",
++ "management_bridge": "eth0",
++ "ntp_servers": [
++ "10.104.45.1"
++ ],
++ "provider_networks": [
++ {
++ "network": {
++ "container_bridge": "eth0",
++ "container_interface": "eth0",
++ "container_type": "veth",
++ "group_binds": [
++ "neutron_linuxbridge_agent",
++ "all_containers",
++ "hosts"
++ ],
++ "host_bind_override": "eth0",
++ "is_container_address": true,
++ "is_ssh_address": true,
++ "net_name": "flat",
++ "type": "flat"
++ }
++ },
++ {
++ "network": {
++ "container_bridge": "vlan20",
++ "container_interface": "vlan20",
++ "container_type": "veth",
++ "group_binds": [
++ "neutron_linuxbridge_agent",
++ "neutron-openvswitch-agent"
++ ],
++ "ip_from_q": "tunnel",
++ "net_name": "vxlan",
++ "range": "1:1000",
++ "type": "vxlan"
++ }
++ },
++ {
++ "network": {
++ "container_bridge": "vlan10",
++ "container_interface": "vlan10",
++ "container_type": "veth",
++ "group_binds": [
++ "glance_api",
++ "cinder_api",
++ "cinder_volume",
++ "nova_compute"
++ ],
++ "ip_from_q": "storage",
++ "type": "raw"
++ }
++ }
++ ],
++ "swift": {
++ "drives": [
++ {
++ "name": "swift1.img"
++ },
++ {
++ "name": "swift2.img"
++ },
++ {
++ "name": "swift3.img"
++ }
++ ],
++ "mount_point": "/srv",
++ "part_power": 8,
++ "replication_network": "vlan10",
++ "storage_network": "vlan10",
++ "storage_policies": [
++ {
++ "policy": {
++ "default": true,
++ "index": 0,
++ "name": "default"
++ }
++ }
++ ]
++ },
++ "tunnel_bridge": "vlan20"
++ }
++ },
++ "all_containers": {
++ "children": [
++ "unbound_containers",
++ "ceph-osd_containers",
++ "orchestration_containers",
++ "operator_containers",
++ "memcaching_containers",
++ "metering-infra_containers",
++ "trove-infra_containers",
++ "ironic-infra_containers",
++ "ceph-mon_containers",
++ "swift_containers",
++ "storage_containers",
++ "ironic-server_containers",
++ "mq_containers",
++ "shared-infra_containers",
++ "compute_containers",
++ "storage-infra_containers",
++ "swift-proxy_containers",
++ "haproxy_containers",
++ "key-manager_containers",
++ "metering-alarm_containers",
++ "magnum-infra_containers",
++ "network_containers",
++ "sahara-infra_containers",
++ "os-infra_containers",
++ "image_containers",
++ "compute-infra_containers",
++ "log_containers",
++ "ironic-compute_containers",
++ "metering-compute_containers",
++ "identity_containers",
++ "dashboard_containers",
++ "dnsaas_containers",
++ "database_containers",
++ "metrics_containers",
++ "repo-infra_containers"
++ ],
++ "hosts": []
++ },
++ "aodh_alarm_evaluator": {
++ "children": [],
++ "hosts": []
++ },
++ "aodh_alarm_notifier": {
++ "children": [],
++ "hosts": []
++ },
++ "aodh_all": {
++ "children": [
++ "aodh_alarm_notifier",
++ "aodh_api",
++ "aodh_alarm_evaluator",
++ "aodh_listener"
++ ],
++ "hosts": []
++ },
++ "aodh_api": {
++ "children": [],
++ "hosts": []
++ },
++ "aodh_container": {
++ "hosts": []
++ },
++ "aodh_listener": {
++ "children": [],
++ "hosts": []
++ },
++ "barbican_all": {
++ "children": [
++ "barbican_api"
++ ],
++ "hosts": []
++ },
++ "barbican_api": {
++ "children": [],
++ "hosts": []
++ },
++ "barbican_container": {
++ "hosts": []
++ },
++ "baremetal-infra_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "baremetal-interface_config_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "compute-1"
++ ]
++ },
++ "ceilometer_agent_central": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_agent_compute": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_agent_notification": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_all": {
++ "children": [
++ "ceilometer_agent_central",
++ "ceilometer_agent_notification",
++ "ceilometer_api",
++ "ceilometer_collector",
++ "ceilometer_agent_compute"
++ ],
++ "hosts": []
++ },
++ "ceilometer_api": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_api_container": {
++ "hosts": []
++ },
++ "ceilometer_collector": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_collector_container": {
++ "hosts": []
++ },
++ "ceph-mon": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-mon_container": {
++ "hosts": []
++ },
++ "ceph-mon_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-mon_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-osd": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-osd_container": {
++ "hosts": []
++ },
++ "ceph-osd_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-osd_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph_all": {
++ "children": [
++ "ceph-mon",
++ "ceph-osd"
++ ],
++ "hosts": []
++ },
++ "cinder_all": {
++ "children": [
++ "cinder_api",
++ "cinder_backup",
++ "cinder_volume",
++ "cinder_scheduler"
++ ],
++ "hosts": []
++ },
++ "cinder_api": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_api_container": {
++ "hosts": []
++ },
++ "cinder_backup": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_scheduler": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_scheduler_container": {
++ "hosts": []
++ },
++ "cinder_volume": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_volumes_container": {
++ "hosts": []
++ },
++ "compute-infra_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "compute-infra_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "compute-infra_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "compute_all": {
++ "hosts": [
++ "compute-1"
++ ]
++ },
++ "compute_containers": {
++ "children": [
++ "compute-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "compute_hosts": {
++ "hosts": [
++ "compute-1"
++ ]
++ },
++ "dashboard_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "dashboard_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "dashboard_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "database_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "database_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_all": {
++ "children": [
++ "designate_producer",
++ "designate_mdns",
++ "designate_api",
++ "designate_worker",
++ "designate_central",
++ "designate_sink"
++ ],
++ "hosts": []
++ },
++ "designate_api": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_central": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_container": {
++ "hosts": []
++ },
++ "designate_mdns": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_producer": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_sink": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_worker": {
++ "children": [],
++ "hosts": []
++ },
++ "dnsaas_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "dnsaas_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "galera": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "galera_all": {
++ "children": [
++ "galera"
++ ],
++ "hosts": []
++ },
++ "galera_container": {
++ "hosts": []
++ },
++ "glance_all": {
++ "children": [
++ "glance_registry",
++ "glance_api"
++ ],
++ "hosts": []
++ },
++ "glance_api": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "glance_container": {
++ "hosts": []
++ },
++ "glance_registry": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "gnocchi_all": {
++ "children": [
++ "gnocchi_api",
++ "gnocchi_metricd"
++ ],
++ "hosts": []
++ },
++ "gnocchi_api": {
++ "children": [],
++ "hosts": []
++ },
++ "gnocchi_container": {
++ "hosts": []
++ },
++ "gnocchi_metricd": {
++ "children": [],
++ "hosts": []
++ },
++ "haproxy": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "haproxy_all": {
++ "children": [
++ "haproxy"
++ ],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "haproxy_container": {
++ "hosts": []
++ },
++ "haproxy_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "haproxy_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_all": {
++ "children": [
++ "heat_api",
++ "heat_engine",
++ "heat_api_cloudwatch",
++ "heat_api_cfn"
++ ],
++ "hosts": []
++ },
++ "heat_api": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_api_cfn": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_api_cloudwatch": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_apis_container": {
++ "hosts": []
++ },
++ "heat_engine": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_engine_container": {
++ "hosts": []
++ },
++ "horizon": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "horizon_all": {
++ "children": [
++ "horizon"
++ ],
++ "hosts": []
++ },
++ "horizon_container": {
++ "hosts": []
++ },
++ "hosts": {
++ "children": [
++ "memcaching_hosts",
++ "metering-compute_hosts",
++ "image_hosts",
++ "shared-infra_hosts",
++ "storage_hosts",
++ "metering-infra_hosts",
++ "os-infra_hosts",
++ "ironic-server_hosts",
++ "key-manager_hosts",
++ "ceph-osd_hosts",
++ "dnsaas_hosts",
++ "network_hosts",
++ "haproxy_hosts",
++ "mq_hosts",
++ "database_hosts",
++ "swift-proxy_hosts",
++ "trove-infra_hosts",
++ "ironic-compute_hosts",
++ "metering-alarm_hosts",
++ "log_hosts",
++ "ceph-mon_hosts",
++ "compute_hosts",
++ "orchestration_hosts",
++ "compute-infra_hosts",
++ "identity_hosts",
++ "unbound_hosts",
++ "swift_hosts",
++ "sahara-infra_hosts",
++ "magnum-infra_hosts",
++ "ironic-infra_hosts",
++ "metrics_hosts",
++ "dashboard_hosts",
++ "storage-infra_hosts",
++ "operator_hosts",
++ "repo-infra_hosts"
++ ],
++ "hosts": []
++ },
++ "identity_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "identity_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "identity_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "image_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "image_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "image_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic-compute_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic-compute_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "ironic-compute_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic-infra_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic-infra_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "ironic-infra_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic-server_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "ironic-server_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "ironic_all": {
++ "children": [
++ "ironic_conductor",
++ "ironic_api"
++ ],
++ "hosts": []
++ },
++ "ironic_api": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic_api_container": {
++ "hosts": []
++ },
++ "ironic_compute": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic_compute_container": {
++ "hosts": []
++ },
++ "ironic_conductor": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic_conductor_container": {
++ "hosts": []
++ },
++ "ironic_server": {
++ "children": [],
++ "hosts": []
++ },
++ "ironic_server_container": {
++ "hosts": []
++ },
++ "ironic_servers": {
++ "children": [
++ "ironic_server"
++ ],
++ "hosts": []
++ },
++ "key-manager_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "key-manager_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "keystone": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "keystone_all": {
++ "children": [
++ "keystone"
++ ],
++ "hosts": []
++ },
++ "keystone_container": {
++ "hosts": []
++ },
++ "log_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "log_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "lxc_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1",
++ "compute-1"
++ ]
++ },
++ "magnum": {
++ "children": [],
++ "hosts": []
++ },
++ "magnum-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "magnum-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "magnum_all": {
++ "children": [
++ "magnum"
++ ],
++ "hosts": []
++ },
++ "magnum_container": {
++ "hosts": []
++ },
++ "memcached": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "memcached_all": {
++ "children": [
++ "memcached"
++ ],
++ "hosts": []
++ },
++ "memcached_container": {
++ "hosts": []
++ },
++ "memcaching_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "memcaching_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-alarm_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-alarm_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-compute_container": {
++ "hosts": []
++ },
++ "metering-compute_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-compute_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metrics_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metrics_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "mq_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "mq_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "network_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "network_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "network_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_agents_container": {
++ "hosts": []
++ },
++ "neutron_all": {
++ "children": [
++ "neutron_agent",
++ "neutron_metadata_agent",
++ "neutron_linuxbridge_agent",
++ "neutron_bgp_dragent",
++ "neutron_dhcp_agent",
++ "neutron_lbaas_agent",
++ "neutron_l3_agent",
++ "neutron_metering_agent",
++ "neutron_server",
++ "neutron_sriov_nic_agent",
++ "neutron_openvswitch_agent"
++ ],
++ "hosts": []
++ },
++ "neutron_bgp_dragent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_dhcp_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_l3_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_lbaas_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_linuxbridge_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1",
++ "compute-1"
++ ]
++ },
++ "neutron_metadata_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_metering_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_openvswitch_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1",
++ "compute-1"
++ ]
++ },
++ "neutron_server": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_server_container": {
++ "hosts": []
++ },
++ "neutron_sriov_nic_agent": {
++ "children": [],
++ "hosts": [
++ "compute-1"
++ ]
++ },
++ "nova_all": {
++ "children": [
++ "nova_console",
++ "nova_scheduler",
++ "ironic_compute",
++ "nova_api_placement",
++ "nova_api_metadata",
++ "nova_api_os_compute",
++ "nova_conductor",
++ "nova_compute"
++ ],
++ "hosts": []
++ },
++ "nova_api_metadata": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_api_metadata_container": {
++ "hosts": []
++ },
++ "nova_api_os_compute": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_api_os_compute_container": {
++ "hosts": []
++ },
++ "nova_api_placement": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_api_placement_container": {
++ "hosts": []
++ },
++ "nova_compute": {
++ "children": [],
++ "hosts": [
++ "controller-1",
++ "compute-1"
++ ]
++ },
++ "nova_compute_container": {
++ "hosts": []
++ },
++ "nova_conductor": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_conductor_container": {
++ "hosts": []
++ },
++ "nova_console": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_console_container": {
++ "hosts": []
++ },
++ "nova_scheduler": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_scheduler_container": {
++ "hosts": []
++ },
++ "operator_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "operator_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "orchestration_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "orchestration_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "orchestration_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "os-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "os-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "pkg_repo": {
++ "children": [],
++ "hosts": []
++ },
++ "rabbit_mq_container": {
++ "hosts": []
++ },
++ "rabbitmq": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "rabbitmq_all": {
++ "children": [
++ "rabbitmq"
++ ],
++ "hosts": []
++ },
++ "remote": {
++ "children": [
++ "swift-remote_hosts"
++ ],
++ "hosts": []
++ },
++ "remote_containers": {
++ "children": [
++ "swift-remote_containers"
++ ],
++ "hosts": []
++ },
++ "repo-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "repo-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "repo_all": {
++ "children": [
++ "pkg_repo"
++ ],
++ "hosts": []
++ },
++ "repo_container": {
++ "hosts": []
++ },
++ "rsyslog": {
++ "children": [],
++ "hosts": []
++ },
++ "rsyslog_all": {
++ "children": [
++ "rsyslog"
++ ],
++ "hosts": []
++ },
++ "rsyslog_container": {
++ "hosts": []
++ },
++ "sahara-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "sahara-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "sahara_all": {
++ "children": [
++ "sahara_api",
++ "sahara_engine"
++ ],
++ "hosts": []
++ },
++ "sahara_api": {
++ "children": [],
++ "hosts": []
++ },
++ "sahara_container": {
++ "hosts": []
++ },
++ "sahara_engine": {
++ "children": [],
++ "hosts": []
++ },
++ "shared-infra_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "shared-infra_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "shared-infra_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "storage-infra_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage-infra_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "storage-infra_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "storage_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift-proxy_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift-proxy_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "swift-proxy_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift-remote_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "swift-remote_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "swift_acc": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_acc_container": {
++ "hosts": []
++ },
++ "swift_all": {
++ "children": [
++ "swift_cont",
++ "swift_obj",
++ "swift_proxy",
++ "swift_acc"
++ ],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_cont": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_cont_container": {
++ "hosts": []
++ },
++ "swift_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "swift_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_obj": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_obj_container": {
++ "hosts": []
++ },
++ "swift_proxy": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_proxy_container": {
++ "hosts": []
++ },
++ "swift_remote": {
++ "children": [],
++ "hosts": []
++ },
++ "swift_remote_all": {
++ "children": [
++ "swift_remote"
++ ],
++ "hosts": []
++ },
++ "swift_remote_container": {
++ "hosts": []
++ },
++ "trove-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "trove-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_all": {
++ "children": [
++ "trove_conductor",
++ "trove_taskmanager",
++ "trove_api"
++ ],
++ "hosts": []
++ },
++ "trove_api": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_api_container": {
++ "hosts": []
++ },
++ "trove_conductor": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_conductor_container": {
++ "hosts": []
++ },
++ "trove_taskmanager": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_taskmanager_container": {
++ "hosts": []
++ },
++ "unbound": {
++ "children": [],
++ "hosts": []
++ },
++ "unbound_all": {
++ "children": [
++ "unbound"
++ ],
++ "hosts": []
++ },
++ "unbound_container": {
++ "hosts": []
++ },
++ "unbound_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "unbound_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "utility": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "utility_all": {
++ "children": [
++ "utility"
++ ],
++ "hosts": []
++ },
++ "utility_container": {
++ "hosts": []
++ }
++}
+\ No newline at end of file
+diff --git a/tests/openstack_inventory_real_prod.json b/tests/openstack_inventory_real_prod.json
+new file mode 100644
+index 00000000..329dd6ac
+--- /dev/null
++++ b/tests/openstack_inventory_real_prod.json
+@@ -0,0 +1,1919 @@
++{
++ "_meta": {
++ "hostvars": {
++ "compute-1": {
++ "ansible_host": "172.29.236.16",
++ "component": "nova_compute",
++ "container_address": "172.29.236.16",
++ "container_name": "compute-1",
++ "container_networks": {
++ "ens255f0_address": {
++ "address": "172.29.236.16",
++ "bridge": "ens255f0",
++ "netmask": null,
++ "type": "veth"
++ },
++ "storage_address": {
++ "bridge": "vlan978",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ },
++ "tunnel_address": {
++ "bridge": "vlan1002",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ }
++ },
++ "container_types": "compute-1-host_containers",
++ "is_metal": true,
++ "os_net_config": {
++ "network_config": [
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.236.16/24"
++ }
++ ],
++ "dns_servers": [
++ "172.29.236.11"
++ ],
++ "name": "ens255f0",
++ "type": "interface",
++ "use_dhcp": false
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.244.16/24"
++ }
++ ],
++ "device": "ens4f0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 978
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.240.16/24"
++ }
++ ],
++ "device": "ens4f0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 1002
++ }
++ ]
++ },
++ "physical_host": "compute-1",
++ "physical_host_group": "compute_hosts",
++ "properties": {
++ "is_metal": true,
++ "service_name": "nova"
++ }
++ },
++ "controller-1": {
++ "ansible_host": "172.29.236.11",
++ "cinder_backends": {
++ "lvm": {
++ "iscsi_ip_address": "172.29.236.11",
++ "volume_backend_name": "LVM_iSCSI",
++ "volume_driver": "cinder.volume.drivers.lvm.LVMVolumeDriver",
++ "volume_group": "cinder-volumes"
++ }
++ },
++ "component": "heat_engine",
++ "container_address": "172.29.236.11",
++ "container_name": "controller-1",
++ "container_networks": {
++ "ens255f0_address": {
++ "address": "172.29.236.11",
++ "bridge": "ens255f0",
++ "netmask": null,
++ "type": "veth"
++ },
++ "storage_address": {
++ "bridge": "vlan978",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ },
++ "tunnel_address": {
++ "bridge": "vlan1002",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ }
++ },
++ "container_types": "controller-1-host_containers",
++ "is_metal": true,
++ "physical_host": "controller-1",
++ "physical_host_group": "orchestration_hosts",
++ "properties": {
++ "is_metal": true,
++ "service_name": "heat"
++ },
++ "swift_proxy_vars": {
++ "read_affinity": "r1=100",
++ "write_affinity": "r1",
++ "write_affinity_node_count": "1 * replicas"
++ },
++ "swift_vars": {
++ "region": 1,
++ "zone": 0
++ }
++ },
++ "controller-2": {
++ "ansible_host": "172.29.236.14",
++ "component": "heat_engine",
++ "container_address": "172.29.236.14",
++ "container_name": "controller-2",
++ "container_networks": {
++ "ens255f0_address": {
++ "address": "172.29.236.14",
++ "bridge": "ens255f0",
++ "netmask": null,
++ "type": "veth"
++ },
++ "storage_address": {
++ "bridge": "vlan978",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ },
++ "tunnel_address": {
++ "bridge": "vlan1002",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ }
++ },
++ "container_types": "controller-2-host_containers",
++ "is_metal": true,
++ "os_net_config": {
++ "network_config": [
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.236.14/24"
++ }
++ ],
++ "dns_servers": [
++ "172.29.236.11",
++ "10.39.12.252"
++ ],
++ "name": "ens255f0",
++ "type": "interface",
++ "use_dhcp": false
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.244.14/24"
++ }
++ ],
++ "device": "ens4f0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 978
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.240.14/24"
++ }
++ ],
++ "device": "ens4f0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 1002
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "10.39.172.11/24"
++ }
++ ],
++ "device": "ens4f0",
++ "dns_servers": [
++ "10.39.12.252"
++ ],
++ "routes": [
++ {
++ "ip_netmask": "0.0.0.0/0",
++ "next_hop": "10.39.172.254"
++ }
++ ],
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 902
++ }
++ ]
++ },
++ "physical_host": "controller-2",
++ "physical_host_group": "orchestration_hosts",
++ "properties": {
++ "is_metal": true,
++ "service_name": "heat"
++ }
++ },
++ "controller-3": {
++ "ansible_host": "172.29.236.15",
++ "component": "heat_engine",
++ "container_address": "172.29.236.15",
++ "container_name": "controller-3",
++ "container_networks": {
++ "ens255f0_address": {
++ "address": "172.29.236.15",
++ "bridge": "ens255f0",
++ "netmask": null,
++ "type": "veth"
++ },
++ "storage_address": {
++ "bridge": "vlan978",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ },
++ "tunnel_address": {
++ "bridge": "vlan1002",
++ "netmask": "255.255.255.0",
++ "type": "veth"
++ }
++ },
++ "container_types": "controller-3-host_containers",
++ "is_metal": true,
++ "os_net_config": {
++ "network_config": [
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.236.15/24"
++ }
++ ],
++ "dns_servers": [
++ "172.29.236.11",
++ "10.39.12.252"
++ ],
++ "name": "ens255f0",
++ "type": "interface",
++ "use_dhcp": false
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.244.15/24"
++ }
++ ],
++ "device": "ens4f0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 978
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "172.29.240.15/24"
++ }
++ ],
++ "device": "ens4f0",
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 1002
++ },
++ {
++ "addresses": [
++ {
++ "ip_netmask": "10.39.172.12/24"
++ }
++ ],
++ "device": "ens4f0",
++ "dns_servers": [
++ "10.39.12.252"
++ ],
++ "routes": [
++ {
++ "ip_netmask": "0.0.0.0/0",
++ "next_hop": "10.39.172.254"
++ }
++ ],
++ "type": "vlan",
++ "use_dhcp": false,
++ "vlan_id": 902
++ }
++ ]
++ },
++ "physical_host": "controller-3",
++ "physical_host_group": "orchestration_hosts",
++ "properties": {
++ "is_metal": true,
++ "service_name": "heat"
++ }
++ }
++ }
++ },
++ "all": {
++ "vars": {
++ "baremetal_flavor": [
++ {
++ "disk": 10,
++ "extra_specs": {
++ "capabilities:boot_option": "local",
++ "cpu_arch": "x86_64"
++ },
++ "name": "baremetal",
++ "ram": 13999,
++ "vcpus": 8
++ }
++ ],
++ "baremetal_images": [
++ {
++ "container_format": "bare",
++ "disk_format": "raw",
++ "file": "/opt/ncio/overcloudimages/guest-image.img",
++ "name": "golden"
++ },
++ {
++ "container_format": "aki",
++ "disk_format": "aki",
++ "file": "/opt/ncio/overcloudimages/ironic-python-agent.kernel",
++ "name": "ipa-kernel"
++ },
++ {
++ "container_format": "ari",
++ "disk_format": "ari",
++ "file": "/opt/ncio/overcloudimages/ironic-python-agent.initramfs",
++ "name": "ipa-ramdisk"
++ }
++ ],
++ "baremetal_ironic_nodes": [
++ {
++ "driver": "pxe_ipmitool",
++ "driver_info": {
++ "power": {
++ "ipmi_address": "10.38.241.133",
++ "ipmi_password": "admin",
++ "ipmi_username": "admin"
++ }
++ },
++ "name": "controller-2",
++ "network_interface": "flat",
++ "nics": [
++ {
++ "mac": "54:AB:3A:14:11:5A"
++ }
++ ],
++ "properties": {
++ "capabilities": "boot_option:local",
++ "cpu_arch": "x86_64",
++ "cpus": 8,
++ "disk_size": 40,
++ "ram": 16384
++ }
++ },
++ {
++ "driver": "pxe_ipmitool",
++ "driver_info": {
++ "power": {
++ "ipmi_address": "10.38.241.132",
++ "ipmi_password": "admin",
++ "ipmi_username": "admin"
++ }
++ },
++ "name": "controller-3",
++ "network_interface": "flat",
++ "nics": [
++ {
++ "mac": "54:AB:3A:14:0D:2E"
++ }
++ ],
++ "properties": {
++ "capabilities": "boot_option:local",
++ "cpu_arch": "x86_64",
++ "cpus": 8,
++ "disk_size": 40,
++ "ram": 16384
++ }
++ },
++ {
++ "driver": "pxe_ipmitool",
++ "driver_info": {
++ "power": {
++ "ipmi_address": "10.38.241.131",
++ "ipmi_password": "admin",
++ "ipmi_username": "admin"
++ }
++ },
++ "name": "compute-1",
++ "network_interface": "flat",
++ "nics": [
++ {
++ "mac": "54:AB:3A:14:10:D6"
++ }
++ ],
++ "properties": {
++ "capabilities": "boot_option:local",
++ "cpu_arch": "x86_64",
++ "cpus": 8,
++ "disk_size": 40,
++ "ram": 16384
++ }
++ }
++ ],
++ "baremetal_networks": [
++ {
++ "allocation_pool_end": "172.29.236.50",
++ "allocation_pool_start": "172.29.236.14",
++ "cidr": "172.29.236.0/24",
++ "net_name": "provisioning_net",
++ "provider_network_type": "flat",
++ "provider_physical_network": "flat",
++ "subnet_name": "provisioning_subnet"
++ }
++ ],
++ "baremetal_nova_nodes": [
++ {
++ "flavor_name": "baremetal",
++ "image_name": "golden",
++ "networks_list": [
++ {
++ "net-name": "provisioning_net",
++ "v4-fixed-ip": "172.29.236.14"
++ }
++ ],
++ "node_name": "controller-2",
++ "userdata": "#cloud-config\nusers:\n - name: ncioadmin\n sudo: ['ALL=(ALL) NOPASSWD:ALL']\n ssh-authorized-keys:\n - \"{{ lookup('file', ansible_env.PWD + '/.ssh/id_rsa.pub') }}\"\n"
++ },
++ {
++ "flavor_name": "baremetal",
++ "image_name": "golden",
++ "networks_list": [
++ {
++ "net-name": "provisioning_net",
++ "v4-fixed-ip": "172.29.236.15"
++ }
++ ],
++ "node_name": "controller-3",
++ "userdata": "#cloud-config\nusers:\n - name: ncioadmin\n sudo: ['ALL=(ALL) NOPASSWD:ALL']\n ssh-authorized-keys:\n - \"{{ lookup('file', ansible_env.PWD + '/.ssh/id_rsa.pub') }}\"\n"
++ },
++ {
++ "flavor_name": "baremetal",
++ "image_name": "golden",
++ "networks_list": [
++ {
++ "net-name": "provisioning_net",
++ "v4-fixed-ip": "172.29.236.16"
++ }
++ ],
++ "node_name": "compute-1",
++ "userdata": "#cloud-config\nusers:\n - name: ncioadmin\n sudo: ['ALL=(ALL) NOPASSWD:ALL']\n ssh-authorized-keys:\n - \"{{ lookup('file', ansible_env.PWD + '/.ssh/id_rsa.pub') }}\"\n"
++ }
++ ],
++ "container_cidr": "172.29.236.0/24",
++ "external_lb_vip_address": "10.39.172.15",
++ "galera_initial_setup": false,
++ "internal_lb_vip_address": "172.29.236.110",
++ "management_bridge": "ens255f0",
++ "ntp_servers": [
++ "10.39.172.254"
++ ],
++ "provider_networks": [
++ {
++ "network": {
++ "container_bridge": "ens255f0",
++ "container_interface": "ens255f0",
++ "container_type": "veth",
++ "group_binds": [
++ "neutron_linuxbridge_agent",
++ "all_containers",
++ "hosts"
++ ],
++ "host_bind_override": "ens255f0",
++ "is_container_address": true,
++ "is_ssh_address": true,
++ "net_name": "flat",
++ "type": "flat"
++ }
++ },
++ {
++ "network": {
++ "container_bridge": "vlan1002",
++ "container_interface": "vlan1002",
++ "container_type": "veth",
++ "group_binds": [
++ "neutron_linuxbridge_agent",
++ "neutron-openvswitch-agent"
++ ],
++ "ip_from_q": "tunnel",
++ "net_name": "vxlan",
++ "range": "1:1000",
++ "type": "vxlan"
++ }
++ },
++ {
++ "network": {
++ "container_bridge": "vlan978",
++ "container_interface": "vlan978",
++ "container_type": "veth",
++ "group_binds": [
++ "glance_api",
++ "cinder_api",
++ "cinder_volume",
++ "nova_compute"
++ ],
++ "ip_from_q": "storage",
++ "type": "raw"
++ }
++ }
++ ],
++ "swift": {
++ "drives": [
++ {
++ "name": "swift1.img"
++ },
++ {
++ "name": "swift2.img"
++ },
++ {
++ "name": "swift3.img"
++ }
++ ],
++ "mount_point": "/srv",
++ "part_power": 8,
++ "replication_network": "vlan978",
++ "storage_network": "vlan978",
++ "storage_policies": [
++ {
++ "policy": {
++ "default": true,
++ "index": 0,
++ "name": "default"
++ }
++ }
++ ]
++ },
++ "tunnel_bridge": "vlan1002"
++ }
++ },
++ "all_containers": {
++ "children": [
++ "unbound_containers",
++ "ceph-osd_containers",
++ "orchestration_containers",
++ "operator_containers",
++ "memcaching_containers",
++ "metering-infra_containers",
++ "trove-infra_containers",
++ "ironic-infra_containers",
++ "ceph-mon_containers",
++ "swift_containers",
++ "storage_containers",
++ "ironic-server_containers",
++ "mq_containers",
++ "shared-infra_containers",
++ "compute_containers",
++ "storage-infra_containers",
++ "swift-proxy_containers",
++ "haproxy_containers",
++ "key-manager_containers",
++ "metering-alarm_containers",
++ "magnum-infra_containers",
++ "network_containers",
++ "sahara-infra_containers",
++ "os-infra_containers",
++ "image_containers",
++ "compute-infra_containers",
++ "log_containers",
++ "ironic-compute_containers",
++ "metering-compute_containers",
++ "identity_containers",
++ "dashboard_containers",
++ "dnsaas_containers",
++ "database_containers",
++ "metrics_containers",
++ "repo-infra_containers"
++ ],
++ "hosts": []
++ },
++ "aodh_alarm_evaluator": {
++ "children": [],
++ "hosts": []
++ },
++ "aodh_alarm_notifier": {
++ "children": [],
++ "hosts": []
++ },
++ "aodh_all": {
++ "children": [
++ "aodh_alarm_notifier",
++ "aodh_api",
++ "aodh_alarm_evaluator",
++ "aodh_listener"
++ ],
++ "hosts": []
++ },
++ "aodh_api": {
++ "children": [],
++ "hosts": []
++ },
++ "aodh_container": {
++ "hosts": []
++ },
++ "aodh_listener": {
++ "children": [],
++ "hosts": []
++ },
++ "barbican_all": {
++ "children": [
++ "barbican_api"
++ ],
++ "hosts": []
++ },
++ "barbican_api": {
++ "children": [],
++ "hosts": []
++ },
++ "barbican_container": {
++ "hosts": []
++ },
++ "baremetal-infra_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "baremetal-interface_config_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "compute-1"
++ ]
++ },
++ "ceilometer_agent_central": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_agent_compute": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_agent_notification": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_all": {
++ "children": [
++ "ceilometer_agent_central",
++ "ceilometer_agent_notification",
++ "ceilometer_api",
++ "ceilometer_collector",
++ "ceilometer_agent_compute"
++ ],
++ "hosts": []
++ },
++ "ceilometer_api": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_api_container": {
++ "hosts": []
++ },
++ "ceilometer_collector": {
++ "children": [],
++ "hosts": []
++ },
++ "ceilometer_collector_container": {
++ "hosts": []
++ },
++ "ceph-mon": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-mon_container": {
++ "hosts": []
++ },
++ "ceph-mon_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-mon_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-osd": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-osd_container": {
++ "hosts": []
++ },
++ "ceph-osd_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph-osd_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "ceph_all": {
++ "children": [
++ "ceph-mon",
++ "ceph-osd"
++ ],
++ "hosts": []
++ },
++ "cinder_all": {
++ "children": [
++ "cinder_api",
++ "cinder_backup",
++ "cinder_volume",
++ "cinder_scheduler"
++ ],
++ "hosts": []
++ },
++ "cinder_api": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_api_container": {
++ "hosts": []
++ },
++ "cinder_backup": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_scheduler": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_scheduler_container": {
++ "hosts": []
++ },
++ "cinder_volume": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "cinder_volumes_container": {
++ "hosts": []
++ },
++ "compute-infra_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "compute-infra_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "compute-infra_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "compute_all": {
++ "hosts": [
++ "compute-1"
++ ]
++ },
++ "compute_containers": {
++ "children": [
++ "compute-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "compute_hosts": {
++ "hosts": [
++ "compute-1"
++ ]
++ },
++ "dashboard_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "dashboard_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "dashboard_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "database_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "database_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_all": {
++ "children": [
++ "designate_producer",
++ "designate_mdns",
++ "designate_api",
++ "designate_worker",
++ "designate_central",
++ "designate_sink"
++ ],
++ "hosts": []
++ },
++ "designate_api": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_central": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_container": {
++ "hosts": []
++ },
++ "designate_mdns": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_producer": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_sink": {
++ "children": [],
++ "hosts": []
++ },
++ "designate_worker": {
++ "children": [],
++ "hosts": []
++ },
++ "dnsaas_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "dnsaas_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "galera": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "galera_all": {
++ "children": [
++ "galera"
++ ],
++ "hosts": []
++ },
++ "galera_container": {
++ "hosts": []
++ },
++ "glance_all": {
++ "children": [
++ "glance_registry",
++ "glance_api"
++ ],
++ "hosts": []
++ },
++ "glance_api": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "glance_container": {
++ "hosts": []
++ },
++ "glance_registry": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "gnocchi_all": {
++ "children": [
++ "gnocchi_api",
++ "gnocchi_metricd"
++ ],
++ "hosts": []
++ },
++ "gnocchi_api": {
++ "children": [],
++ "hosts": []
++ },
++ "gnocchi_container": {
++ "hosts": []
++ },
++ "gnocchi_metricd": {
++ "children": [],
++ "hosts": []
++ },
++ "haproxy": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "haproxy_all": {
++ "children": [
++ "haproxy"
++ ],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "haproxy_container": {
++ "hosts": []
++ },
++ "haproxy_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "haproxy_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_all": {
++ "children": [
++ "heat_api",
++ "heat_engine",
++ "heat_api_cloudwatch",
++ "heat_api_cfn"
++ ],
++ "hosts": []
++ },
++ "heat_api": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_api_cfn": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_api_cloudwatch": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_apis_container": {
++ "hosts": []
++ },
++ "heat_engine": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "heat_engine_container": {
++ "hosts": []
++ },
++ "horizon": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "horizon_all": {
++ "children": [
++ "horizon"
++ ],
++ "hosts": []
++ },
++ "horizon_container": {
++ "hosts": []
++ },
++ "hosts": {
++ "children": [
++ "memcaching_hosts",
++ "metering-compute_hosts",
++ "image_hosts",
++ "shared-infra_hosts",
++ "storage_hosts",
++ "metering-infra_hosts",
++ "os-infra_hosts",
++ "ironic-server_hosts",
++ "key-manager_hosts",
++ "ceph-osd_hosts",
++ "dnsaas_hosts",
++ "network_hosts",
++ "haproxy_hosts",
++ "mq_hosts",
++ "database_hosts",
++ "swift-proxy_hosts",
++ "trove-infra_hosts",
++ "ironic-compute_hosts",
++ "metering-alarm_hosts",
++ "log_hosts",
++ "ceph-mon_hosts",
++ "compute_hosts",
++ "orchestration_hosts",
++ "compute-infra_hosts",
++ "identity_hosts",
++ "unbound_hosts",
++ "swift_hosts",
++ "sahara-infra_hosts",
++ "magnum-infra_hosts",
++ "ironic-infra_hosts",
++ "metrics_hosts",
++ "dashboard_hosts",
++ "storage-infra_hosts",
++ "operator_hosts",
++ "repo-infra_hosts"
++ ],
++ "hosts": []
++ },
++ "identity_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "identity_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "identity_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "image_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "image_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "image_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic-compute_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic-compute_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "ironic-compute_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic-infra_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic-infra_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "ironic-infra_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic-server_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "ironic-server_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "ironic_all": {
++ "children": [
++ "ironic_conductor",
++ "ironic_api"
++ ],
++ "hosts": []
++ },
++ "ironic_api": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic_api_container": {
++ "hosts": []
++ },
++ "ironic_compute": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "ironic_compute_container": {
++ "hosts": []
++ },
++ "ironic_conductor": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "ironic_conductor_container": {
++ "hosts": []
++ },
++ "ironic_server": {
++ "children": [],
++ "hosts": []
++ },
++ "ironic_server_container": {
++ "hosts": []
++ },
++ "ironic_servers": {
++ "children": [
++ "ironic_server"
++ ],
++ "hosts": []
++ },
++ "key-manager_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "key-manager_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "keystone": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "keystone_all": {
++ "children": [
++ "keystone"
++ ],
++ "hosts": []
++ },
++ "keystone_container": {
++ "hosts": []
++ },
++ "log_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "log_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "lxc_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1",
++ "compute-1"
++ ]
++ },
++ "magnum": {
++ "children": [],
++ "hosts": []
++ },
++ "magnum-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "magnum-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "magnum_all": {
++ "children": [
++ "magnum"
++ ],
++ "hosts": []
++ },
++ "magnum_container": {
++ "hosts": []
++ },
++ "memcached": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "memcached_all": {
++ "children": [
++ "memcached"
++ ],
++ "hosts": []
++ },
++ "memcached_container": {
++ "hosts": []
++ },
++ "memcaching_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "memcaching_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-alarm_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-alarm_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-compute_container": {
++ "hosts": []
++ },
++ "metering-compute_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-compute_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metering-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "metrics_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "metrics_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "mq_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "mq_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "network_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "network_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "network_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_agents_container": {
++ "hosts": []
++ },
++ "neutron_all": {
++ "children": [
++ "neutron_agent",
++ "neutron_metadata_agent",
++ "neutron_linuxbridge_agent",
++ "neutron_bgp_dragent",
++ "neutron_dhcp_agent",
++ "neutron_lbaas_agent",
++ "neutron_l3_agent",
++ "neutron_metering_agent",
++ "neutron_server",
++ "neutron_sriov_nic_agent",
++ "neutron_openvswitch_agent"
++ ],
++ "hosts": []
++ },
++ "neutron_bgp_dragent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_dhcp_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_l3_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_lbaas_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_linuxbridge_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1",
++ "compute-1"
++ ]
++ },
++ "neutron_metadata_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_metering_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_openvswitch_agent": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1",
++ "compute-1"
++ ]
++ },
++ "neutron_server": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "neutron_server_container": {
++ "hosts": []
++ },
++ "neutron_sriov_nic_agent": {
++ "children": [],
++ "hosts": [
++ "compute-1"
++ ]
++ },
++ "nova_all": {
++ "children": [
++ "nova_console",
++ "nova_scheduler",
++ "ironic_compute",
++ "nova_api_placement",
++ "nova_api_metadata",
++ "nova_api_os_compute",
++ "nova_conductor",
++ "nova_compute"
++ ],
++ "hosts": []
++ },
++ "nova_api_metadata": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_api_metadata_container": {
++ "hosts": []
++ },
++ "nova_api_os_compute": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_api_os_compute_container": {
++ "hosts": []
++ },
++ "nova_api_placement": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_api_placement_container": {
++ "hosts": []
++ },
++ "nova_compute": {
++ "children": [],
++ "hosts": [
++ "controller-1",
++ "compute-1"
++ ]
++ },
++ "nova_compute_container": {
++ "hosts": []
++ },
++ "nova_conductor": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_conductor_container": {
++ "hosts": []
++ },
++ "nova_console": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_console_container": {
++ "hosts": []
++ },
++ "nova_scheduler": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "nova_scheduler_container": {
++ "hosts": []
++ },
++ "operator_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "operator_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "orchestration_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "orchestration_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "orchestration_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "os-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "os-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "pkg_repo": {
++ "children": [],
++ "hosts": []
++ },
++ "rabbit_mq_container": {
++ "hosts": []
++ },
++ "rabbitmq": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "rabbitmq_all": {
++ "children": [
++ "rabbitmq"
++ ],
++ "hosts": []
++ },
++ "remote": {
++ "children": [
++ "swift-remote_hosts"
++ ],
++ "hosts": []
++ },
++ "remote_containers": {
++ "children": [
++ "swift-remote_containers"
++ ],
++ "hosts": []
++ },
++ "repo-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "repo-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "repo_all": {
++ "children": [
++ "pkg_repo"
++ ],
++ "hosts": []
++ },
++ "repo_container": {
++ "hosts": []
++ },
++ "rsyslog": {
++ "children": [],
++ "hosts": []
++ },
++ "rsyslog_all": {
++ "children": [
++ "rsyslog"
++ ],
++ "hosts": []
++ },
++ "rsyslog_container": {
++ "hosts": []
++ },
++ "sahara-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "sahara-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "sahara_all": {
++ "children": [
++ "sahara_api",
++ "sahara_engine"
++ ],
++ "hosts": []
++ },
++ "sahara_api": {
++ "children": [],
++ "hosts": []
++ },
++ "sahara_container": {
++ "hosts": []
++ },
++ "sahara_engine": {
++ "children": [],
++ "hosts": []
++ },
++ "shared-infra_all": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "shared-infra_containers": {
++ "children": [
++ "controller-3-host_containers",
++ "controller-2-host_containers",
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "shared-infra_hosts": {
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "storage-infra_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage-infra_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "storage-infra_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "storage_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "storage_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift-proxy_all": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift-proxy_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "swift-proxy_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift-remote_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "swift-remote_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "swift_acc": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_acc_container": {
++ "hosts": []
++ },
++ "swift_all": {
++ "children": [
++ "swift_cont",
++ "swift_obj",
++ "swift_proxy",
++ "swift_acc"
++ ],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_cont": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_cont_container": {
++ "hosts": []
++ },
++ "swift_containers": {
++ "children": [
++ "controller-1-host_containers"
++ ],
++ "hosts": []
++ },
++ "swift_hosts": {
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_obj": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_obj_container": {
++ "hosts": []
++ },
++ "swift_proxy": {
++ "children": [],
++ "hosts": [
++ "controller-1"
++ ]
++ },
++ "swift_proxy_container": {
++ "hosts": []
++ },
++ "swift_remote": {
++ "children": [],
++ "hosts": []
++ },
++ "swift_remote_all": {
++ "children": [
++ "swift_remote"
++ ],
++ "hosts": []
++ },
++ "swift_remote_container": {
++ "hosts": []
++ },
++ "trove-infra_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "trove-infra_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_all": {
++ "children": [
++ "trove_conductor",
++ "trove_taskmanager",
++ "trove_api"
++ ],
++ "hosts": []
++ },
++ "trove_api": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_api_container": {
++ "hosts": []
++ },
++ "trove_conductor": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_conductor_container": {
++ "hosts": []
++ },
++ "trove_taskmanager": {
++ "children": [],
++ "hosts": []
++ },
++ "trove_taskmanager_container": {
++ "hosts": []
++ },
++ "unbound": {
++ "children": [],
++ "hosts": []
++ },
++ "unbound_all": {
++ "children": [
++ "unbound"
++ ],
++ "hosts": []
++ },
++ "unbound_container": {
++ "hosts": []
++ },
++ "unbound_containers": {
++ "children": [],
++ "hosts": []
++ },
++ "unbound_hosts": {
++ "children": [],
++ "hosts": []
++ },
++ "utility": {
++ "children": [],
++ "hosts": [
++ "controller-3",
++ "controller-2",
++ "controller-1"
++ ]
++ },
++ "utility_all": {
++ "children": [
++ "utility"
++ ],
++ "hosts": []
++ },
++ "utility_container": {
++ "hosts": []
++ }
++}
+\ No newline at end of file
+diff --git a/tests/roles/bootstrap-host/defaults/main.yml b/tests/roles/bootstrap-host/defaults/main.yml
+deleted file mode 100644
+index 13033f32..00000000
+--- a/tests/roles/bootstrap-host/defaults/main.yml
++++ /dev/null
+@@ -1,231 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-## AIO user-space configuration options
+-# Scenario used to bootstrap the host
+-bootstrap_host_scenario: aio_lxc
+-#
+-# Boolean option to implement OpenStack-Ansible configuration for an AIO
+-# Switch to no for a multi-node configuration
+-bootstrap_host_aio_config: yes
+-#
+-# Path to the location of the bootstrapping configuration files
+-bootstrap_host_aio_config_path: "{{ playbook_dir }}/../etc/openstack_deploy"
+-#
+-# Path to the location of the scripts the bootstrap scripts use
+-bootstrap_host_aio_script_path: "{{ playbook_dir }}/../scripts"
+-#
+-# The user space configuration file names to use
+-bootstrap_host_user_variables_filename: "user_variables.yml"
+-bootstrap_host_user_secrets_filename: "user_secrets.yml"
+-#
+-# Paths to configuration file targets that should be created by the bootstrap
+-bootstrap_host_target_config_paths:
+- - /etc/openstack_deploy
+- - /etc/openstack_deploy/conf.d
+- - /etc/openstack_deploy/env.d
+-
+-# The user variables template to use
+-bootstrap_user_variables_template: user_variables.aio.yml.j2
+-
+-# Extra user variables files can be loaded into /etc/openstack_deploy by
+-# test scenarios. The dict uses scenario as the key to load a list of extra
+-# templates if necessary.
+-bootstrap_user_variables_extra_templates:
+- ceph:
+- - src: user_variables_ceph.yml.j2
+- dest: user_variables_ceph.yml
+- translations:
+- - src: user_variables_translations.yml.j2
+- dest: user_variables_translations.yml
+- octavia:
+- - src: user_variables_octavia.yml.j2
+- dest: user_variables_octavia.yml
+-
+-## Swap memory
+-# If there is no swap memory present, the bootstrap will create a loopback disk
+-# for the purpose of having swap memory available. Swap is required for some of
+-# the services deployed and is useful for AIO's built with less than 16GB memory.
+-# By default the swap size is set to 8GB unless the host memory is less than 8GB,
+-# in which case it is set to 4GB.
+-bootstrap_host_swap_size: "{% if ansible_memory_mb['real']['total'] < 8*1024 %}4{% else %}8{% endif %}"
+-
+-## Loopback volumes
+-# Sparse loopback disks are used for the containers if there is no secondary
+-# disk available to partition for btrfs. They are also used for Ceph, Cinder,
+-# Swift and Nova (instance storage).
+-# The size of the loopback volumes can be customized here (in gigabytes).
+-#
+-# Size of the machines loopback disk in gigabytes (GB).
+-bootstrap_host_loopback_machines_size: 128
+-#
+-# Boolean option to deploy the loopback disk for Cinder
+-bootstrap_host_loopback_cinder: yes
+-# Size of the Cinder loopback disk in gigabytes (GB).
+-bootstrap_host_loopback_cinder_size: 1024
+-#
+-# Boolean option to deploy the loopback disk for Swift
+-bootstrap_host_loopback_swift: yes
+-# Size of the Swift loopback disk in gigabytes (GB).
+-bootstrap_host_loopback_swift_size: 1024
+-#
+-# Boolean option to deploy the loopback disk for Nova
+-bootstrap_host_loopback_nova: yes
+-# Size of the Nova loopback disk in gigabytes (GB).
+-bootstrap_host_loopback_nova_size: 1024
+-
+-# Boolean option to deploy the OSD loopback disks and cluster UUID for Ceph
+-bootstrap_host_ceph: "{{ (bootstrap_host_scenario == 'ceph') | bool }}"
+-# Size of the Ceph OSD loopbacks
+-bootstrap_host_loopback_ceph_size: 1024
+-# Ceph OSDs to create on the AIO host
+-ceph_osd_images:
+- - 'ceph1'
+- - 'ceph2'
+- - 'ceph3'
+-
+-## Network configuration
+-# The AIO bootstrap configures bridges for use with the AIO deployment.
+-# By default, these bridges are configured to be independent of any physical
+-# interfaces, and they have their 'bridge_ports' set to 'none'. However,
+-# deployers can add a physical interface to 'bridge_ports' to connect the
+-# bridge to a real physical interface.
+-#
+-# A setting of 'none' keeps the bridges as independent from physical
+-# interfaces (the default).
+-#
+-# Setting the value to 'eth1' would mean that the bridge is directly connected
+-# to the eth1 device.
+-#
+-# See https://wiki.debian.org/BridgeNetworkConnections for more details.
+-bootstrap_host_bridge_mgmt_ports: none
+-bootstrap_host_bridge_vxlan_ports: none
+-bootstrap_host_bridge_storage_ports: none
+-bootstrap_host_bridge_vlan_ports: "br-vlan-veth"
+-# This enables the VXLAN encapsulation the traditional bridges
+-# (br-mgmt, br-vxlan, br-storage)
+-bootstrap_host_encapsulation_enabled: "{{ not bootstrap_host_aio_config | bool }}"
+-#
+-# Default network IP ranges
+-mgmt_range: "172.29.236"
+-vxlan_range: "172.29.240"
+-storage_range: "172.29.244"
+-vlan_range: "172.29.248"
+-netmask: "255.255.252.0"
+-#
+-# NICs
+-bootstrap_host_public_interface: "{{ ansible_default_ipv4.interface }}"
+-bootstrap_host_encapsulation_interface: eth1
+-#
+-#Encapsulations
+-bootstrap_host_encapsulation_interfaces:
+- encap-mgmt:
+- id: 236
+- underlay_device: "{{ bootstrap_host_encapsulation_interface }}"
+- friendly_name: "Encapsulation of br-mgmt with VXLAN"
+- encap-vxlan:
+- id: 240
+- underlay_device: "{{ bootstrap_host_encapsulation_interface }}"
+- friendly_name: "Encapsulation of br-vxlan with VXLAN"
+- encap-storage:
+- id: 244
+- underlay_device: "{{ bootstrap_host_encapsulation_interface }}"
+- friendly_name: "Encapsulation of br-storage with VXLAN"
+- encap-vlan:
+- id: 248
+- underlay_device: "{{ bootstrap_host_encapsulation_interface }}"
+- friendly_name: "Encapsulation of br-vlan with VXLAN"
+-#
+-# Bridges
+-bridges:
+- - name: "br-mgmt"
+- ip_addr: "172.29.236.100"
+- netmask: "255.255.252.0"
+- - name: "br-vxlan"
+- ip_addr: "172.29.240.100"
+- netmask: "255.255.252.0"
+- - name: "br-storage"
+- ip_addr: "172.29.244.100"
+- netmask: "255.255.252.0"
+- - name: "br-vlan"
+- ip_addr: "172.29.248.100"
+- alias: "172.29.248.1"
+- veth_peer: "eth12"
+- netmask: "255.255.252.0"
+-
+-bootstrap_host_bridges_interfaces:
+- br-mgmt:
+- ports: "{{ bootstrap_host_encapsulation_enabled | bool | ternary ('encap-mgmt', bootstrap_host_bridge_mgmt_ports) }}"
+- ip_address_range: "{{ mgmt_range }}"
+- ip_netmask: "{{ netmask }}"
+- br-storage:
+- ports: "{{ bootstrap_host_encapsulation_enabled | bool | ternary ('encap-storage', bootstrap_host_bridge_storage_ports) }}"
+- ip_address_range: "{{ storage_range }}"
+- ip_netmask: "{{ netmask }}"
+- br-vxlan:
+- ports: "{{ bootstrap_host_encapsulation_enabled | bool | ternary ('encap-vxlan', bootstrap_host_bridge_vxlan_ports) }}"
+- ip_address_range: "{{ vxlan_range }}"
+- ip_netmask: "{{ netmask }}"
+- br-vlan:
+- mode: "{{ bridge_vlan_inet_mode | default('static') }}"
+- ports: "{{ bootstrap_host_encapsulation_enabled | bool | ternary ('encap-vlan', bootstrap_host_bridge_vlan_ports) }}"
+- ip_address_range: "{{ vlan_range }}"
+- ip_netmask: "{{ netmask }}"
+- state_change_scripts: "{{ bridge_vlan_state_change_scripts }}"
+-#
+-# Convenience scripts
+-bridge_vlan_state_change_scripts: |
+- pre-up ip link add br-vlan-veth type veth peer name eth12 || true
+- pre-up ip link set br-vlan-veth up
+- pre-up ip link set eth12 up
+- post-down ip link del br-vlan-veth || true
+-bridge_iptables_rules: |
+- # To ensure ssh checksum is correct
+- up /sbin/iptables -A POSTROUTING -t mangle -p tcp -o {{ bootstrap_host_public_interface }} -j CHECKSUM --checksum-fill
+- down /sbin/iptables -D POSTROUTING -t mangle -p tcp -o {{ bootstrap_host_public_interface }} -j CHECKSUM --checksum-fill
+- # To provide internet connectivity to instances
+- up /sbin/iptables -t nat -A POSTROUTING -o {{ bootstrap_host_public_interface }} -j MASQUERADE
+- down /sbin/iptables -t nat -D POSTROUTING -o {{ bootstrap_host_public_interface }} -j MASQUERADE
+-
+-# Set the container technology in service. Options are lxc.
+-container_tech: "lxc"
+-
+-## Extra storage
+-# An AIO may optionally be built using a second storage device. If a
+-# secondary disk device to use is not specified, then the AIO will be
+-# built on any existing disk partitions.
+-#
+-# WARNING: The data on a secondary storage device specified here will
+-# be destroyed and repartitioned.
+-#
+-# Specify the secondary disk device to use.
+-bootstrap_host_data_disk_device: null
+-#
+-# Boolean value to force the repartitioning of the secondary device.
+-bootstrap_host_data_disk_device_force: no
+-#
+-# If the storage capacity on this device is greater than or equal to this
+-# size (in GB), the bootstrap process will use it.
+-bootstrap_host_data_disk_min_size: 50
+-
+-# Boolean option to build Amphora image and certs
+-bootstrap_host_octavia: "{{ (bootstrap_host_scenario in ['octavia', 'translations']) | bool }}"
+-
+-### Optional Settings ###
+-
+-# Specify the public IP address for the host.
+-# By default the address will be set to the ipv4 address of the
+-# host's network interface that has the default route on it.
+-#bootstrap_host_public_address: 0.0.0.0
+diff --git a/tests/roles/bootstrap-host/tasks/check-requirements.yml b/tests/roles/bootstrap-host/tasks/check-requirements.yml
+deleted file mode 100644
+index 58782b4f..00000000
+--- a/tests/roles/bootstrap-host/tasks/check-requirements.yml
++++ /dev/null
+@@ -1,97 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-- name: Check for a supported Operating System
+- assert:
+- that:
+- - (ansible_distribution == 'Ubuntu' and ansible_distribution_release == 'xenial') or
+- (ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7') or
+- (ansible_os_family == 'Suse' and ansible_distribution_major_version == '42')
+- msg: "The only supported platforms for this release are Ubuntu 16.04 LTS (Xenial), CentOS 7 (WIP) and openSUSE Leap 42.X (WIP)"
+- tags:
+- - check-operating-system
+-
+-- name: Identify the space available in /
+- # NOTE(hwoarang): df does not work reliably on btrfs filesystems
+- # https://btrfs.wiki.kernel.org/index.php/FAQ#How_much_free_space_do_I_have.3F
+- # As such, use the btrfs tools to determine the real available size on the
+- # disk
+- shell: |
+- if [[ $(df -T / | tail -n 1 | awk '{print $2}') == "btrfs" ]]; then
+- btrfs fi usage --kbytes / | awk '/^.*Free / {print $3}'| sed 's/\..*//'
+- else
+- df -BK / | awk '!/^Filesystem/ {print $4}' | sed 's/K//'
+- fi
+- when: bootstrap_host_data_disk_device == None
+- changed_when: false
+- register: root_space_available
+- tags:
+- - check-disk-size
+-
+-# Convert root_space_available to bytes.
+-- name: Set root disk facts
+- set_fact:
+- host_root_space_available_bytes: "{{ ( root_space_available.stdout | int) * 1024 | int }}"
+- when:
+- - bootstrap_host_data_disk_device == None
+- tags:
+- - check-disk-size
+-
+-- name: Set data disk facts
+- set_fact:
+- host_data_disk_sectors: "{{ (ansible_devices[bootstrap_host_data_disk_device]['sectors'] | int) }}"
+- host_data_disk_sectorsize: "{{ (ansible_devices[bootstrap_host_data_disk_device]['sectorsize'] | int) }}"
+- when:
+- - bootstrap_host_data_disk_device != None
+- tags:
+- - check-disk-size
+-
+-# Calculate the size of the bootstrap_host_data_disk_device by muliplying sectors with sectorsize.
+-- name: Calculate data disk size
+- set_fact:
+- host_data_disk_size_bytes: "{{ ((host_data_disk_sectors | int) * (host_data_disk_sectorsize | int)) | int }}"
+- when: bootstrap_host_data_disk_device != None
+- tags:
+- - check-disk-size
+-
+-# Convert bootstrap_host_data_disk_min_size to bytes.
+-- name: Set min size fact
+- set_fact:
+- host_data_disk_min_size_bytes: "{{ ((bootstrap_host_data_disk_min_size | int) * 1024**3) | int }}"
+- tags:
+- - check-disk-size
+-
+-- name: Fail if there is not enough space available in /
+- assert:
+- that: |
+- (host_root_space_available_bytes | int) >= (host_data_disk_min_size_bytes | int)
+- when: bootstrap_host_data_disk_device == None
+- tags:
+- - check-disk-size
+-
+-- name: Fail if there is not enough disk space available (disk specified)
+- assert:
+- that: |
+- (host_data_disk_size_bytes | int) >= (host_data_disk_min_size_bytes | int)
+- when: bootstrap_host_data_disk_device != None
+- tags:
+- - check-disk-size
+-
+-- name: Ensure that the kernel has VXLAN support
+- modprobe:
+- name: vxlan
+- state: present
+- tags:
+- - check-vxlan
+diff --git a/tests/roles/bootstrap-host/tasks/install_packages.yml b/tests/roles/bootstrap-host/tasks/install_packages.yml
+deleted file mode 100644
+index e7ff5812..00000000
+--- a/tests/roles/bootstrap-host/tasks/install_packages.yml
++++ /dev/null
+@@ -1,53 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-- name: Install RDO package
+- package:
+- name: "{{ rdo_package }}"
+- state: "present"
+- register: install_cloud_rdo_package
+- until: install_cloud_rdo_package | success
+- retries: 5
+- delay: 2
+- when:
+- - ansible_pkg_mgr in ['yum', 'dnf']
+-
+-- name: Add zypper cloud repositories
+- zypper_repository:
+- auto_import_keys: yes
+- autorefresh: yes
+- name: "{{ item.name }}"
+- repo: "{{ item.uri }}"
+- runrefresh: yes
+- with_items: "{{ opensuse_openstack_repos }}"
+- retries: 5
+- delay: 2
+- when:
+- - ansible_pkg_mgr == 'zypper'
+-
+-- name: Remove known problem packages
+- package:
+- name: "{{ packages_remove }}"
+- state: absent
+- tags:
+- - remove-packages
+-
+-- name: Install packages
+- package:
+- name: "{{ packages_install }}"
+- state: present
+- update_cache: "{{ (ansible_pkg_mgr in ['apt', 'zypper']) | ternary('yes', omit) }}"
+- tags:
+- - install-packages
+diff --git a/tests/roles/bootstrap-host/tasks/main.yml b/tests/roles/bootstrap-host/tasks/main.yml
+deleted file mode 100644
+index e2533014..00000000
+--- a/tests/roles/bootstrap-host/tasks/main.yml
++++ /dev/null
+@@ -1,126 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-# Before we do anything, check the minimum requirements
+-- include: check-requirements.yml
+- tags:
+- - check-requirements
+-
+-# We will look for the most specific variable files first and eventually
+-# end up with the least-specific files.
+-- name: Gather variables for each operating system
+- include_vars: "{{ item }}"
+- with_first_found:
+- - "{{ ansible_distribution | lower }}-{{ ansible_distribution_version | lower }}.yml"
+- - "{{ ansible_distribution | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
+- - "{{ ansible_os_family | lower }}-{{ ansible_distribution_major_version | lower }}.yml"
+- - "{{ ansible_distribution | lower }}.yml"
+- - "{{ ansible_os_family | lower }}.yml"
+- tags:
+- - always
+-
+-- name: Create the required directories
+- file:
+- path: "{{ item }}"
+- state: directory
+- with_items:
+- - "/openstack"
+- tags:
+- - create-directories
+-
+-- include: install_packages.yml
+- tags:
+- - install-packages
+-
+-# Prepare the data disk, if one is provided
+-- include: prepare_data_disk.yml
+- when: bootstrap_host_data_disk_device != None
+- tags:
+- - prepare-data-disk
+-
+-# Prepare the Machines storage loopback disk
+-# This is only necessary when there is no secondary disk
+-# available to partition for btrfs
+-- include: prepare_loopback_machines.yml
+- when:
+- - bootstrap_host_data_disk_device == None
+- tags:
+- - prepare-loopback-machines
+-
+-# Prepare the swap space loopback disk
+-# This is only necessary if there isn't swap already
+-- include: prepare_loopback_swap.yml
+- static: no
+- when:
+- - ansible_swaptotal_mb < 1
+- tags:
+- - prepare-loopback-swap
+-
+-# Prepare the Cinder LVM VG loopback disk
+-# This is only necessary if bootstrap_host_loopback_cinder is set to yes
+-- include: prepare_loopback_cinder.yml
+- when:
+- - bootstrap_host_loopback_cinder | bool
+- tags:
+- - prepare-loopback-cinder
+-
+-# Prepare the Nova instance storage loopback disk
+-- include: prepare_loopback_nova.yml
+- when:
+- - bootstrap_host_loopback_nova | bool
+- tags:
+- - prepare-loopback-nova
+-
+-# Prepare the Swift data storage loopback disks
+-- include: prepare_loopback_swift.yml
+- when:
+- - bootstrap_host_loopback_swift | bool
+- tags:
+- - prepare-loopback-swift
+-
+-# Prepare the Ceph cluster UUID and loopback disks
+-- include: prepare_ceph.yml
+- when:
+- - bootstrap_host_ceph | bool
+- tags:
+- - prepare-ceph
+-
+-# Prepare the Octavia certs and image
+-- include: prepare_octavia.yml
+- when:
+- - bootstrap_host_octavia | bool
+- tags:
+- - prepare-octavia
+-
+-# Ensure hostname/ip is consistent with inventory
+-- include: prepare_hostname.yml
+- tags:
+- - prepare-hostname
+-
+-# Prepare the network interfaces
+-- include: prepare_networking.yml
+- tags:
+- - prepare-networking
+-
+-# Ensure that there are both private and public ssh keys for root
+-- include: prepare_ssh_keys.yml
+- tags:
+- - prepare-ssh-keys
+-
+-# Put the OpenStack-Ansible configuration for an All-In-One on the host
+-- include: prepare_aio_config.yml
+- when: bootstrap_host_aio_config | bool
+- tags:
+- - prepare-aio-config
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_aio_config.yml b/tests/roles/bootstrap-host/tasks/prepare_aio_config.yml
+deleted file mode 100644
+index f9b0804d..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_aio_config.yml
++++ /dev/null
+@@ -1,243 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-
+-- name: Create the required deployment directories
+- file:
+- path: "{{ item }}"
+- state: directory
+- with_items: "{{ bootstrap_host_target_config_paths }}"
+- tags:
+- - create-directories
+-
+-- name: Deploy user conf.d configuration
+- config_template:
+- src: "{{ item.path | default(bootstrap_host_aio_config_path ~ '/conf.d') }}/{{ item.name }}"
+- dest: "/etc/openstack_deploy/conf.d/{{ item.name | regex_replace('.aio$', '') }}"
+- config_overrides: "{{ item.override | default({}) }}"
+- config_type: "yaml"
+- with_items: "{{ openstack_confd_entries | default([]) }}"
+- tags:
+- - deploy-confd
+-
+-- name: Deploy openstack_user_config
+- config_template:
+- src: "{{ bootstrap_host_aio_config_path }}/openstack_user_config.yml.aio.j2"
+- dest: "/etc/openstack_deploy/openstack_user_config.yml"
+- config_overrides: "{{ openstack_user_config_overrides | default({}) }}"
+- config_type: "yaml"
+- list_extend: false
+- tags:
+- - deploy-openstack-user-config
+-
+-- name: Deploy user_secrets file
+- config_template:
+- src: "{{ bootstrap_host_aio_config_path }}/user_secrets.yml"
+- dest: "/etc/openstack_deploy/{{ bootstrap_host_user_secrets_filename }}"
+- config_overrides: "{{ user_secrets_overrides | default({}) }}"
+- config_type: "yaml"
+- tags:
+- - deploy-user-secrets
+-
+-- name: Generate any missing values in user_secrets
+- command: "/opt/ansible-runtime/bin/python {{ bootstrap_host_aio_script_path }}/pw-token-gen.py --file /etc/openstack_deploy/{{ bootstrap_host_user_secrets_filename }}"
+- changed_when: false
+- tags:
+- - generate_secrets
+-
+-- name: Detect whether the host is an OpenStack-CI host
+- stat:
+- path: /etc/nodepool
+- register: nodepool_dir
+-
+-# OVH nodepool nodes have an issue which causes nested virt
+-# instances to crash with a hardware error, then a dump.
+-# We therefore detect whether we're running on OVH and
+-# force it to use qemu instead.
+-- name: Discover the OpenStack-Infra mirrors
+- shell: |
+- source /etc/ci/mirror_info.sh
+- NODEPOOL_OVERRIDES="/etc/openstack_deploy/user_openstackci.yml"
+- echo "uca_apt_repo_url: '${NODEPOOL_UCA_MIRROR}'" >> ${NODEPOOL_OVERRIDES}
+- echo "openstack_hosts_centos_mirror_url: '${NODEPOOL_CENTOS_MIRROR}'" >> ${NODEPOOL_OVERRIDES}
+- if [[ ${NODEPOOL_PYPI_MIRROR} == *.ovh.* ]]; then
+- echo "nova_virt_type: 'qemu'" >> ${NODEPOOL_OVERRIDES}
+- fi
+- args:
+- executable: /bin/bash
+- when:
+- - nodepool_dir.stat.exists | bool
+- tags:
+- - skip_ansible_lint
+-
+-- name: Discover the OpenStack-Infra pypi/wheel mirror
+- shell: |
+- source /etc/ci/mirror_info.sh
+- echo "${NODEPOOL_PYPI_MIRROR}"
+- echo "${NODEPOOL_WHEEL_MIRROR}"
+- args:
+- executable: /bin/bash
+- register: _pypi_wheel_mirror
+- when:
+- - nodepool_dir.stat.exists | bool
+- tags:
+- - skip_ansible_lint
+-
+-- name: Discover the OpenStack-Infra LXC reverse proxy
+- shell: |
+- source /etc/ci/mirror_info.sh
+- echo ${NODEPOOL_LXC_IMAGE_PROXY}
+- register: _lxc_mirror
+- args:
+- executable: /bin/bash
+- when:
+- - nodepool_dir.stat.exists | bool
+- tags:
+- - skip_ansible_lint
+-
+-- name: Set the package cache timeout to 60 mins in OpenStack-CI
+- set_fact:
+- cache_timeout: 3600
+- when:
+- - cache_timeout is not defined
+- - nodepool_dir.stat.exists
+-
+-- name: Determine if the host has a global pip config file
+- stat:
+- path: /etc/pip.conf
+- register: pip_conf_file
+-
+-# NOTE(mhayden): The OpenStack CI images for CentOS 7 recently set SELinux to
+-# Enforcing mode by default. While I am normally a supporter of this change,
+-# the SELinux policy work for CentOS 7 is not done yet.
+-- name: Set SELinux to permissive mode in OpenStack-CI
+- selinux:
+- policy: targeted
+- state: permissive
+- when:
+- - ansible_selinux.status is defined
+- - ansible_selinux.status == "enabled"
+-
+-# This is a very dirty hack due to images.linuxcontainers.org
+-# constantly failing to resolve in openstack-infra.
+-- name: Implement hard-coded hosts entries for consistently failing name
+- lineinfile:
+- path: "/etc/hosts"
+- line: "{{ item }}"
+- state: present
+- with_items:
+- - "91.189.91.21 images.linuxcontainers.org us.images.linuxcontainers.org"
+- - "91.189.88.37 images.linuxcontainers.org uk.images.linuxcontainers.org"
+- when:
+- - nodepool_dir.stat.exists
+-
+-- name: Determine the fastest available OpenStack-Infra wheel mirror
+- command: "{{ bootstrap_host_aio_script_path }}/fastest-infra-wheel-mirror.py"
+- register: fastest_wheel_mirror
+- when: not pip_conf_file.stat.exists
+-
+-- name: Set repo_build_pip_extra_indexes fact
+- set_fact:
+- repo_build_pip_extra_indexes: "{{ fastest_wheel_mirror.stdout_lines }}"
+- when: not pip_conf_file.stat.exists
+-
+-- name: Set the user_variables
+- config_template:
+- src: "{{ bootstrap_user_variables_template }}"
+- dest: "/etc/openstack_deploy/{{ bootstrap_host_user_variables_filename }}"
+- config_overrides: "{{ user_variables_overrides | default({}) }}"
+- config_type: yaml
+-
+-- name: Drop the extra user_variables files for this scenario
+- config_template:
+- src: "{{ item.src }}"
+- dest: "/etc/openstack_deploy/{{ item.dest }}"
+- config_overrides: "{{ item.config_overrides | default({}) }}"
+- config_type: yaml
+- with_items: "{{ bootstrap_user_variables_extra_templates[bootstrap_host_scenario] | default([]) }}"
+-
+-- name: Copy modified cinder-volume env.d file for ceph scenario
+- copy:
+- src: "{{ playbook_dir }}/../etc/openstack_deploy/env.d/cinder-volume.yml.container.example"
+- dest: "/etc/openstack_deploy/env.d/cinder-volume.yml"
+- when:
+- - "bootstrap_host_scenario == 'ceph'"
+-
+-- name: Copy modified env.d file for metal scenario
+- copy:
+- src: "{{ playbook_dir }}/../etc/openstack_deploy/env.d/aio_metal.yml.example"
+- dest: "/etc/openstack_deploy/env.d/aio_metal.yml"
+- when:
+- - "bootstrap_host_scenario == 'aio_metal'"
+-
+-- name: Add user_conf_files to contain the list of files to copy into containers
+- file:
+- path: /etc/openstack_deploy/user_conf_files.yml
+- state: touch
+- when: pip_conf_file.stat.exists
+- tags:
+- - container-conf-files
+-
+-- name: Ensure that the first line in user_conf_files is correct
+- lineinfile:
+- dest: /etc/openstack_deploy/user_conf_files.yml
+- line: "---"
+- insertbefore: BOF
+- when: pip_conf_file.stat.exists
+- tags:
+- - container-conf-files
+-
+-- name: Ensure that the second line in user_conf_files is correct
+- lineinfile:
+- dest: /etc/openstack_deploy/user_conf_files.yml
+- line: "lxc_container_cache_files:"
+- insertafter: "^---"
+- when: pip_conf_file.stat.exists
+- tags:
+- - container-conf-files
+-
+-- name: Add the dict to copy the global pip config file into user_conf_files
+- lineinfile:
+- dest: /etc/openstack_deploy/user_conf_files.yml
+- line: " - { src: '/etc/pip.conf', dest: '/etc/pip.conf' }"
+- when: pip_conf_file.stat.exists
+- tags:
+- - container-conf-files
+-
+-- name: Create vars override folders if we need to test them
+- file:
+- path: "{{ item }}"
+- state: directory
+- with_items:
+- - /etc/openstack_deploy/group_vars
+- - /etc/openstack_deploy/host_vars
+- when: "(lookup('env','ACTION') | default(false,true)) == 'varstest'"
+-
+-- name: Create user-space overrides
+- lineinfile:
+- path: "{{ item.path }}"
+- state: present
+- line: "{{ item.line }}"
+- create: yes
+- with_items:
+- - path: /etc/openstack_deploy/group_vars/hosts.yml
+- line: 'babar: "elephant"'
+- - path: /etc/openstack_deploy/group_vars/hosts.yml
+- line: 'lxc_hosts_package_state: "present"'
+- - path: /etc/openstack_deploy/host_vars/localhost.yml
+- line: 'security_package_state: "present"'
+- - path: /etc/openstack_deploy/host_vars/localhost.yml
+- line: 'tintin: "milou"'
+- when: "(lookup('env','ACTION') | default(false,true)) == 'varstest'"
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_ceph.yml b/tests/roles/bootstrap-host/tasks/prepare_ceph.yml
+deleted file mode 100644
+index efaba66b..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_ceph.yml
++++ /dev/null
+@@ -1,72 +0,0 @@
+----
+-# Copyright 2016, Logan Vig <logan2211@gmail.com>
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-- name: Create sparse ceph OSD files
+- command: truncate -s {{ bootstrap_host_loopback_ceph_size }}G /openstack/{{ item }}.img
+- args:
+- creates: "/openstack/{{ item }}.img"
+- with_items: "{{ ceph_osd_images }}"
+- register: ceph_create
+- changed_when: false
+- tags:
+- - ceph-file-create
+-
+-- name: Create the ceph loopback device
+- command: losetup -f /openstack/{{ item.item }}.img --show
+- with_items: "{{ ceph_create.results }}"
+- register: ceph_create_loopback
+- when: not item|skipped
+- changed_when: false
+- tags:
+- - skip_ansible_lint
+-
+-- name: Ensure that rc.local exists
+- file:
+- path: "{{ rc_local }}"
+- state: touch
+- mode: "u+x"
+- tags:
+- - ceph-rc-file
+-
+-- name: Create ceph loopback at boot time
+- lineinfile:
+- dest: "{{ rc_local }}"
+- line: "losetup -f /openstack/{{ item }}.img"
+- insertbefore: "{{ rc_local_insert_before }}"
+- with_items: "{{ ceph_osd_images }}"
+-
+-# TODO(logan): Move these vars to user_variables.ceph.yml.j2 once LP #1649381
+-# is fixed and eliminate this task.
+-- name: Write ceph cluster config
+- copy:
+- content: |
+- ---
+- devices: {{ ceph_create_loopback.results | map(attribute='stdout') | list | to_yaml | trim }}
+- cinder_backends:
+- "RBD":
+- volume_driver: cinder.volume.drivers.rbd.RBDDriver
+- rbd_pool: volumes
+- rbd_ceph_conf: /etc/ceph/ceph.conf
+- rbd_store_chunk_size: 8
+- volume_backend_name: rbddriver
+- rbd_user: cinder
+- rbd_secret_uuid: "{% raw %}{{ cinder_ceph_client_uuid }}{% endraw %}"
+- report_discard_supported: true
+- dest: /etc/openstack_deploy/user_ceph_aio.yml
+- force: no
+- become: false
+- when: not ceph_create_loopback|skipped
+- tags:
+- - skip_ansible_lint
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_data_disk.yml b/tests/roles/bootstrap-host/tasks/prepare_data_disk.yml
+deleted file mode 100644
+index 0d4bc9a5..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_data_disk.yml
++++ /dev/null
+@@ -1,79 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-
+-# Only execute the disk partitioning process if a partition labeled
+-# 'openstack-data{1,2}' is not present and that partition is not
+-# formatted as ext4. This is an attempt to achieve idempotency just
+-# in case these tasks are executed multiple times.
+-- name: Determine whether partitions labeled openstack-data{1,2} are present
+- shell: |
+- parted --script -l -m | egrep -q ':ext4:openstack-data[12]:;$'
+- register: data_disk_partitions
+- changed_when: false
+- failed_when: false
+- tags:
+- - check-data-disk-partitions
+-
+-- name: Dismount and remove fstab entries for anything on the data disk device
+- mount:
+- name: "{{ item.mount }}"
+- src: "{{ item.device }}"
+- fstype: ext4
+- state: absent
+- when:
+- - data_disk_partitions.rc == 1 or bootstrap_host_data_disk_device_force | bool
+- - item.device | search(bootstrap_host_data_disk_device)
+- with_items:
+- - "{{ ansible_mounts }}"
+-
+-- name: Partition the whole data disk for our usage
+- command: "{{ item }}"
+- when: data_disk_partitions.rc == 1 or bootstrap_host_data_disk_device_force | bool
+- with_items:
+- - "parted --script /dev/{{ bootstrap_host_data_disk_device | regex_replace('!','/') }} mklabel gpt"
+- - "parted --align optimal --script /dev/{{ bootstrap_host_data_disk_device | regex_replace('!','/') }} mkpart openstack-data1 ext4 0% 40%"
+- - "parted --align optimal --script /dev/{{ bootstrap_host_data_disk_device | regex_replace('!','/') }} mkpart openstack-data2 btrfs 40% 100%"
+- tags:
+- - create-data-disk-partitions
+-
+-- name: Format the partitions
+- filesystem:
+- fstype: "{{ item.fstype }}"
+- dev: "{{ item.dev }}"
+- when: data_disk_partitions.rc == 1 or bootstrap_host_data_disk_device_force | bool
+- with_items:
+- - dev: "/dev/{{ bootstrap_host_data_disk_device | regex_replace('!(.*)$','/\\1p') }}1"
+- fstype: "ext4"
+- - dev: "/dev/{{ bootstrap_host_data_disk_device | regex_replace('!(.*)$','/\\1p') }}2"
+- fstype: "btrfs"
+- tags:
+- - format-data-partitions
+-
+-- name: Create the mount points, fstab entries and mount the file systems
+- mount:
+- name: "{{ item.mount_point }}"
+- src: "{{ item.device }}"
+- fstype: "{{ item.fstype }}"
+- state: mounted
+- with_items:
+- - mount_point: /openstack
+- device: "/dev/{{ bootstrap_host_data_disk_device | regex_replace('!(.*)$','/\\1p') }}1"
+- fstype: ext4
+- - mount_point: /var/lib/machines
+- device: "/dev/{{ bootstrap_host_data_disk_device | regex_replace('!(.*)$','/\\1p') }}2"
+- fstype: btrfs
+- tags:
+- - mount-data-partitions
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_loopback_cinder.yml b/tests/roles/bootstrap-host/tasks/prepare_loopback_cinder.yml
+deleted file mode 100644
+index 38fd224c..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_loopback_cinder.yml
++++ /dev/null
+@@ -1,72 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-- name: Create sparse Cinder file
+- command: "truncate -s {{ bootstrap_host_loopback_cinder_size }}G /openstack/cinder.img"
+- args:
+- creates: /openstack/cinder.img
+- register: cinder_create
+- tags:
+- - cinder-file-create
+-
+-- name: Get a loopback device for cinder file
+- command: losetup -f
+- when: cinder_create | changed
+- register: cinder_losetup
+- tags:
+- - cinder-device-get
+-
+-- name: Create the loopback device
+- command: "losetup {{ cinder_losetup.stdout }} /openstack/cinder.img"
+- when: cinder_create | changed
+- tags:
+- - cinder-device-create
+-
+-- name: Ensure that rc.local exists
+- file:
+- path: "{{ rc_local }}"
+- state: touch
+- mode: "u+x"
+- tags:
+- - cinder-rc-file
+-
+-# As the cinder loopback is an LVM VG, it needs to be mounted differently
+-# to the other loopback files. It requires the use of rc.local to attach
+-# the loopback device on boot so that the VG becomes available immediately
+-# after the boot process completes.
+-- name: Create loopback devices at boot time
+- lineinfile:
+- dest: "{{ rc_local }}"
+- line: "losetup $(losetup -f) /openstack/cinder.img"
+- insertbefore: "{{ rc_local_insert_before }}"
+- tags:
+- - cinder-rc-config
+-
+-- name: Make LVM physical volume on the cinder device
+- command: "{{ item }}"
+- when: cinder_create | changed
+- with_items:
+- - "pvcreate {{ cinder_losetup.stdout }}"
+- - "pvscan"
+- tags:
+- - cinder-lvm-pv
+-
+-- name: Add cinder-volumes volume group
+- lvg:
+- vg: cinder-volumes
+- pvs: "{{ cinder_losetup.stdout }}"
+- when: cinder_create | changed
+- tags:
+- - cinder-lvm-vg
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_loopback_nova.yml b/tests/roles/bootstrap-host/tasks/prepare_loopback_nova.yml
+deleted file mode 100644
+index 37a6f899..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_loopback_nova.yml
++++ /dev/null
+@@ -1,39 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-- name: Create sparse Nova file
+- command: "truncate -s {{ bootstrap_host_loopback_nova_size }}G /openstack/nova.img"
+- args:
+- creates: /openstack/nova.img
+- register: nova_create
+- tags:
+- - nova-file-create
+-
+-- name: Format the Nova file
+- filesystem:
+- fstype: ext4
+- dev: /openstack/nova.img
+- when: nova_create | changed
+- tags:
+- - nova-format-file
+-
+-- name: Create the mount points, fstab entries and mount the file systems
+- mount:
+- name: /var/lib/nova/instances
+- src: /openstack/nova.img
+- fstype: ext4
+- state: mounted
+- tags:
+- - nova-file-mount
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_loopback_swap.yml b/tests/roles/bootstrap-host/tasks/prepare_loopback_swap.yml
+deleted file mode 100644
+index 8143b6eb..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_loopback_swap.yml
++++ /dev/null
+@@ -1,73 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-- name: Create swap file
+- command: "{{ swap_create_command }}"
+- args:
+- creates: /openstack/swap.img
+- register: swap_create
+- tags:
+- - swap-file-create
+-
+-- name: Set swap file permissions to 0600
+- file:
+- path: /openstack/swap.img
+- mode: 0600
+- tags:
+- - swap-permissions
+-
+-- name: Format the swap file
+- command: mkswap /openstack/swap.img
+- when: swap_create | changed
+- tags:
+- - swap-format
+-
+-- name: Ensure that the swap file entry is in /etc/fstab
+- mount:
+- name: none
+- src: /openstack/swap.img
+- fstype: swap
+- opts: sw
+- passno: 0
+- dump: 0
+- state: present
+- tags:
+- - swap-fstab
+-
+-- name: Bring swap file online
+- shell: |
+- return_code=0
+- if ! grep /openstack/swap.img /proc/swaps; then
+- swapon /openstack/swap.img
+- return_code=2
+- fi
+- exit ${return_code}
+- register: _set_swap_online
+- changed_when: _set_swap_online.rc == 2
+- failed_when: _set_swap_online.rc not in [0, 2]
+- # We skip ansible lint testing for this task as it fails with
+- # ANSIBLE0014 Environment variables don't work as part of command
+- # which is nonsense.
+- tags:
+- - skip_ansible_lint
+- - swap-online
+-
+-- name: Set system swappiness
+- sysctl:
+- name: vm.swappiness
+- value: 10
+- state: present
+- tags:
+- - swap-sysctl
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_loopback_swift.yml b/tests/roles/bootstrap-host/tasks/prepare_loopback_swift.yml
+deleted file mode 100644
+index 4901942d..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_loopback_swift.yml
++++ /dev/null
+@@ -1,55 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-- name: Create sparse Swift files
+- command: "truncate -s {{ bootstrap_host_loopback_swift_size }}G /openstack/{{ item }}.img"
+- args:
+- creates: "/openstack/{{ item }}.img"
+- with_items:
+- - 'swift1'
+- - 'swift2'
+- - 'swift3'
+- register: swift_create
+- tags:
+- - swift-file-create
+-
+-- name: Format the Swift files
+- filesystem:
+- fstype: xfs
+- opts: '-K'
+- dev: "/openstack/{{ item }}.img"
+- when: swift_create | changed
+- with_items:
+- - 'swift1'
+- - 'swift2'
+- - 'swift3'
+- tags:
+- - swift-format-file
+-
+-- name: Create the Swift mount points, fstab entries and mount the file systems
+- mount:
+- name: "/srv/{{ item }}.img"
+- src: "/openstack/{{ item }}.img"
+- fstype: xfs
+- opts: 'loop,noatime,nodiratime,nobarrier,logbufs=8'
+- passno: 0
+- dump: 0
+- state: mounted
+- with_items:
+- - 'swift1'
+- - 'swift2'
+- - 'swift3'
+- tags:
+- - swift-file-mount
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_networking.yml b/tests/roles/bootstrap-host/tasks/prepare_networking.yml
+deleted file mode 100644
+index c7ea663e..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_networking.yml
++++ /dev/null
+@@ -1,193 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-- name: Ensure that /etc/network/interfaces.d/ exists (Debian)
+- file:
+- path: /etc/network/interfaces.d/
+- state: directory
+- tags:
+- - networking-dir-create
+- when:
+- - ansible_pkg_mgr == 'apt'
+-
+-- name: Copy AIO network configuration (Debian)
+- template:
+- src: osa_interfaces.cfg.j2
+- dest: /etc/network/interfaces.d/osa_interfaces.cfg
+- register: osa_interfaces
+- when:
+- - bootstrap_host_aio_config | bool
+- - ansible_pkg_mgr == 'apt'
+- tags:
+- - networking-interfaces-file
+-
+-- name: Copy network configuration (RedHat)
+- template:
+- src: "redhat_interface_{{ item.type | default('default') }}.cfg.j2"
+- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name | default('br-mgmt') }}"
+- with_items: "{{ bridges }}"
+- register: network_interfaces_rhel
+- when:
+- - ansible_pkg_mgr in ['yum', 'dnf']
+-
+-- name: Create alias file when required (RedHat)
+- template:
+- src: "redhat_interface_alias.cfg.j2"
+- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ item.name | default('br-mgmt')}}:0"
+- with_items: "{{ bridges }}"
+- when:
+- - ansible_pkg_mgr in ['yum', 'dnf']
+- - item.alias is defined
+-
+-- name: Put down post-up script for veth-peer interfaces (RedHat)
+- template:
+- src: "rpm_interface_{{ item[0] }}.cfg.j2"
+- dest: "/etc/sysconfig/network-scripts/{{ item[0] }}-veth-{{ item[1].name | default('br-mgmt') }}-2-{{ item[1].veth_peer | default('eth1') }}"
+- mode: "0755"
+- with_nested:
+- - [ "ifup-post", "ifdown-post" ]
+- - "{{ bridges }}"
+- when:
+- - item[1].veth_peer is defined
+- - ansible_pkg_mgr in ['yum', 'dnf']
+-
+-- name: Ensure the postup/postdown scripts are loaded (RedHat)
+- lineinfile:
+- dest: "/etc/sysconfig/network-scripts/{{ item[0] }}"
+- line: ". /etc/sysconfig/network-scripts/{{ item[0] }}-veth-{{ item[1].name | default('br-mgmt') }}-2-{{ item[1].veth_peer | default('eth1') }}"
+- insertbefore: "^exit 0"
+- with_nested:
+- - [ "ifup-post", "ifdown-post" ]
+- - "{{ bridges }}"
+- when:
+- - item[1].veth_peer is defined
+- - ansible_pkg_mgr in ['yum', 'dnf']
+-
+-- name: Copy network configuration (SUSE)
+- template:
+- src: "suse_interface_default.cfg.j2"
+- dest: "/etc/sysconfig/network/ifcfg-{{ item.name | default('br-mgmt') }}"
+- with_items: "{{ bridges }}"
+- register: network_interfaces_suse
+- when:
+- - ansible_pkg_mgr == 'zypper'
+-
+-- name: Put down post-up script for veth-peer interfaces (SUSE)
+- template:
+- src: "rpm_interface_{{ item[0] }}.cfg.j2"
+- dest: "/etc/sysconfig/network/scripts/{{ item[0] }}-veth-{{ item[1].name | default('br-mgmt') }}-2-{{ item[1].veth_peer | default('eth1') }}"
+- mode: "0755"
+- with_nested:
+- - [ "ifup-post", "ifdown-post" ]
+- - "{{ bridges }}"
+- when:
+- - item[1].veth_peer is defined
+- - ansible_pkg_mgr == 'zypper'
+-
+-- name: Ensure the postup scripts are loaded (SUSE)
+- lineinfile:
+- dest: "/etc/sysconfig/network/ifcfg-{{ item[1].name | default('br-mgmt') }}"
+- line: "POST_UP_SCRIPT=\"compat:suse:{{ item[0] }}-veth-{{ item[1].name | default('br-mgmt') }}-2-{{ item[1].veth_peer | default('eth1') }}\""
+- with_nested:
+- - [ "ifup-post" ]
+- - "{{ bridges }}"
+- when:
+- - item[1].veth_peer is defined
+- - ansible_pkg_mgr == 'zypper'
+-
+-- name: Ensure the postdown scripts are loaded (SUSE)
+- lineinfile:
+- dest: "/etc/sysconfig/network/ifcfg-{{ item[1].name | default('br-mgmt') }}"
+- line: "POST_DOWN_SCRIPT=\"compat:suse:{{ item[0] }}-veth-{{ item[1].name | default('br-mgmt') }}-2-{{ item[1].veth_peer | default('eth1') }}\""
+- with_nested:
+- - [ "ifdown-post" ]
+- - "{{ bridges }}"
+- when:
+- - item[1].veth_peer is defined
+- - ansible_pkg_mgr == 'zypper'
+-
+-- name: Copy multinode network configuration (Debian)
+- template:
+- src: osa_interfaces_multinode.cfg.j2
+- dest: /etc/network/interfaces.d/osa_interfaces.cfg
+- register: osa_multinode_interfaces
+- when:
+- - not bootstrap_host_aio_config | bool
+- - ansible_pkg_mgr == 'apt'
+- tags:
+- - networking-interfaces-file
+-
+-- name: Ensure our interfaces.d configuration files are loaded automatically (Debian)
+- lineinfile:
+- dest: /etc/network/interfaces
+- line: "source /etc/network/interfaces.d/*.cfg"
+- when:
+- - ansible_pkg_mgr == 'apt'
+- tags:
+- - networking-interfaces-load
+-
+-- name: Shut down the network interfaces
+- command: "ifdown {{ item.name }}"
+- when:
+- - osa_interfaces | changed or osa_multinode_interfaces | changed or network_interfaces_rhel | changed
+- - item.enabled | default(True)
+- with_items:
+- - { name: br-mgmt }
+- - { name: br-storage }
+- - { name: br-vlan }
+- - { name: br-vxlan }
+- - { name: br-dbaas, enabled: "{{ (bootstrap_host_scenario == 'translations') | bool }}" }
+- - { name: br-lbaas, enabled: "{{ (bootstrap_host_scenario in ['translations', 'octavia']) | bool }}" }
+- tags:
+- - networking-interfaces-stop
+-
+-- name: Shut down the encapsulation network interfaces
+- command: "ifdown {{ item.key }}"
+- when:
+- - osa_multinode_interfaces | changed
+- - bootstrap_host_encapsulation_enabled | bool
+- with_dict: "{{ bootstrap_host_encapsulation_interfaces }}"
+- tags:
+- - networking-interfaces-stop
+-
+-- name: Start the encapsulation network interfaces
+- command: "ifup {{ item.key }}"
+- when:
+- - osa_multinode_interfaces | changed
+- - bootstrap_host_encapsulation_enabled | bool
+- with_dict: "{{ bootstrap_host_encapsulation_interfaces }}"
+- tags:
+- - networking-interfaces-start
+-
+-- name: Start the network interfaces
+- command: "ifup {{ item.name }}"
+- when:
+- - osa_interfaces | changed or network_interfaces_rhel | changed or network_interfaces_suse | changed
+- - item.enabled | default(True)
+- with_items:
+- - { name: br-mgmt }
+- - { name: br-storage }
+- - { name: br-vlan }
+- - { name: br-vxlan }
+- - { name: br-dbaas, enabled: "{{ (bootstrap_host_scenario == 'translations') | bool }}" }
+- - { name: br-lbaas, enabled: "{{ (bootstrap_host_scenario in ['translations', 'octavia']) | bool }}" }
+- tags:
+- - networking-interfaces-start
+-
+-- name: Updating the facts due to net changes
+- setup:
+- filter: "ansible_br*"
+- tags:
+- - networking
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_octavia.yml b/tests/roles/bootstrap-host/tasks/prepare_octavia.yml
+deleted file mode 100644
+index 8051b479..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_octavia.yml
++++ /dev/null
+@@ -1,80 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-
+-- name: Install apt packages
+- apt:
+- pkg: "{{ item }}"
+- state: "present"
+- update_cache: yes
+- register: install_packages
+- until: install_packages|success
+- retries: 5
+- delay: 2
+- with_items:
+- - qemu
+- - uuid-runtime
+- - curl
+- - kpartx
+- - git
+-- name: Create Octavia tmp dir
+- file:
+- state: directory
+- path: "/var/lib/octavia"
+-- name: Set Octavia tmp dir
+- set_fact:
+- bootstrap_host_octavia_tmp: "/var/lib/octavia"
+-- name: Install pip requirements
+- pip:
+- name: "{{ item }}"
+- state: "present"
+- extra_args: "-c {{ pip_install_upper_constraints_proto }}://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?id={{ requirements_git_install_branch | regex_replace(' #.*$','') }}"
+- register: install_packages
+- until: install_packages|success
+- retries: 5
+- delay: 2
+- with_items:
+- - argparse
+- - "Babel>=1.3"
+- - dib-utils
+- - PyYAML
+- - diskimage-builder
+-- name: Clone Octavia
+- git:
+- repo: "https://git.openstack.org/openstack/octavia"
+- dest: "{{ bootstrap_host_octavia_tmp }}/octavia"
+- version: "{{ octavia_git_install_branch }}"
+-# Build Octavia amphora image
+-- name: Create amphora image
+- shell: "./diskimage-create.sh -o {{ bootstrap_host_octavia_tmp }}/amphora-x64-haproxy.qcow2"
+- args:
+- chdir: "{{ bootstrap_host_octavia_tmp }}/octavia/diskimage-create"
+- creates: "{{ bootstrap_host_octavia_tmp }}/amphora-x64-haproxy.qcow2"
+- tags:
+- - skip_ansible_lint
+-- name: Change permission
+- file:
+- path: "{{ bootstrap_host_octavia_tmp }}/octavia/bin/create_certificates.sh"
+- mode: 0755
+-- name: Generate certs
+- shell: "{{ bootstrap_host_octavia_tmp }}/octavia/bin/create_certificates.sh {{ bootstrap_host_octavia_tmp }}/certs {{ bootstrap_host_octavia_tmp }}/octavia/etc/certificates/openssl.cnf"
+- args:
+- creates: "{{ bootstrap_host_octavia_tmp }}/certs/ca_01.pem"
+- tags:
+- - skip_ansible_lint
+-- name: Fix certs/private directory access
+- file:
+- path: "{{ bootstrap_host_octavia_tmp }}/certs/private"
+- mode: 0755
+diff --git a/tests/roles/bootstrap-host/tasks/prepare_ssh_keys.yml b/tests/roles/bootstrap-host/tasks/prepare_ssh_keys.yml
+deleted file mode 100644
+index e3f8914a..00000000
+--- a/tests/roles/bootstrap-host/tasks/prepare_ssh_keys.yml
++++ /dev/null
+@@ -1,75 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-- name: Ensure root has a .ssh directory
+- file:
+- path: /root/.ssh
+- state: directory
+- owner: root
+- group: root
+- mode: 0700
+- tags:
+- - ssh-key-dir
+-
+-- name: Check for existing ssh private key file
+- stat:
+- path: /root/.ssh/id_rsa
+- register: ssh_key_private
+- tags:
+- - ssh-key-check
+-
+-- name: Check for existing ssh public key file
+- stat:
+- path: /root/.ssh/id_rsa.pub
+- register: ssh_key_public
+- tags:
+- - ssh-key-check
+-
+-- name: Remove an existing private/public ssh keys if one is missing
+- file:
+- path: "/root/.ssh/{{ item }}"
+- state: absent
+- when: not ssh_key_public.stat.exists or not ssh_key_private.stat.exists
+- with_items:
+- - 'id_rsa'
+- - 'id_rsa.pub'
+- tags:
+- - ssh-key-clean
+-
+-- name: Create ssh key pair for root
+- user:
+- name: root
+- generate_ssh_key: yes
+- ssh_key_bits: 2048
+- ssh_key_file: /root/.ssh/id_rsa
+- tags:
+- - ssh-key-generate
+-
+-- name: Fetch the generated public ssh key
+- fetch:
+- src: "/root/.ssh/id_rsa.pub"
+- dest: "/tmp/id_rsa.pub"
+- flat: yes
+- when: inventory_hostname == groups['all'][0]
+- tags:
+- - ssh-key-authorized
+-
+-- name: Ensure root's new public ssh key is in authorized_keys
+- authorized_key:
+- user: root
+- key: "{{ lookup('file','/tmp/id_rsa.pub') }}"
+- manage_dir: no
+- tags:
+- - ssh-key-authorized
+\ No newline at end of file
+diff --git a/tests/roles/bootstrap-host/templates/osa_interfaces.cfg.j2 b/tests/roles/bootstrap-host/templates/osa_interfaces.cfg.j2
+deleted file mode 100644
+index bc21b90e..00000000
+--- a/tests/roles/bootstrap-host/templates/osa_interfaces.cfg.j2
++++ /dev/null
+@@ -1,107 +0,0 @@
+-## The default networking requires several bridges. These bridges were named to be informative
+-## however they can be named what ever you like and is adaptable to any network infrastructure
+-## environment. This file serves as an example of how to setup basic networking and was ONLY
+-## built for the purpose of being an example and used expressly in the building of an ALL IN
+-## ONE development environment.
+-
+-auto br-mgmt
+-iface br-mgmt inet static
+- bridge_stp off
+- bridge_waitport 0
+- bridge_fd 0
+- # Notice the bridge port is the vlan tagged interface
+- bridge_ports {{ bootstrap_host_bridge_mgmt_ports }}
+- address 172.29.236.100
+- netmask 255.255.252.0
+- offload-sg off
+-
+-auto br-vxlan
+-iface br-vxlan inet static
+- bridge_stp off
+- bridge_waitport 0
+- bridge_fd 0
+- bridge_ports {{ bootstrap_host_bridge_vxlan_ports }}
+- address 172.29.240.100
+- netmask 255.255.252.0
+- offload-sg off
+- # To ensure ssh checksum is correct
+- up /sbin/iptables -A POSTROUTING -t mangle -p tcp --dport 22 -j CHECKSUM --checksum-fill
+- down /sbin/iptables -D POSTROUTING -t mangle -p tcp --dport 22 -j CHECKSUM --checksum-fill
+- # To provide internet connectivity to instances
+- up /sbin/iptables -t nat -A POSTROUTING -o {{ bootstrap_host_public_interface }} -j MASQUERADE
+- down /sbin/iptables -t nat -D POSTROUTING -o {{ bootstrap_host_public_interface }} -j MASQUERADE
+-
+-auto br-storage
+-iface br-storage inet static
+- bridge_stp off
+- bridge_waitport 0
+- bridge_fd 0
+- bridge_ports {{ bootstrap_host_bridge_storage_ports }}
+- address 172.29.244.100
+- netmask 255.255.252.0
+- offload-sg off
+-
+-auto br-vlan
+-iface br-vlan inet static
+- bridge_stp off
+- bridge_waitport 0
+- bridge_fd 0
+- address 172.29.248.100
+- netmask 255.255.252.0
+- offload-sg off
+- # Create veth pair, don't bomb if already exists
+- pre-up ip link add br-vlan-veth type veth peer name eth12 || true
+- # Set both ends UP
+- pre-up ip link set br-vlan-veth up
+- pre-up ip link set eth12 up
+- # Delete veth pair on DOWN
+- post-down ip link del br-vlan-veth || true
+- bridge_ports br-vlan-veth
+-
+-{% if bootstrap_host_scenario == "translations" %}
+-auto br-dbaas
+-iface br-dbaas inet static
+- bridge_stp off
+- bridge_waitport 0
+- bridge_fd 0
+- address 172.29.232.100
+- netmask 255.255.252.0
+- offload-sg off
+- # Create veth pair, don't bomb if already exists
+- pre-up ip link add br-dbaas-veth type veth peer name eth13 || true
+- # Set both ends UP
+- pre-up ip link set br-dbaas-veth up
+- pre-up ip link set eth13 up
+- # Delete veth pair on DOWN
+- post-down ip link del br-dbaas-veth || true
+- bridge_ports br-dbaas-veth
+-
+-{% endif %}
+-{% if bootstrap_host_scenario in ["translations", "octavia"] %}
+-auto br-lbaas
+-iface br-lbaas inet static
+- bridge_stp off
+- bridge_waitport 0
+- bridge_fd 0
+- address 172.29.252.100
+- netmask 255.255.252.0
+- offload-sg off
+- # Create veth pair, don't bomb if already exists
+- pre-up ip link add br-lbaas-veth type veth peer name eth14 || true
+- # Set both ends UP
+- pre-up ip link set br-lbaas-veth up
+- pre-up ip link set eth14 up
+- # Delete veth pair on DOWN
+- post-down ip link del br-lbaas-veth || true
+- bridge_ports br-lbaas-veth
+-
+-{% endif %}
+-
+-# Add an additional address to br-vlan
+-iface br-vlan inet static
+- # Flat network default gateway
+- # -- This needs to exist somewhere for network reachability
+- # -- from the router namespace for floating IP paths.
+- # -- Putting this here is primarily for tempest to work.
+- address 172.29.248.1
+- netmask 255.255.252.0
+diff --git a/tests/roles/bootstrap-host/templates/osa_interfaces_multinode.cfg.j2 b/tests/roles/bootstrap-host/templates/osa_interfaces_multinode.cfg.j2
+deleted file mode 100644
+index c31c4a43..00000000
+--- a/tests/roles/bootstrap-host/templates/osa_interfaces_multinode.cfg.j2
++++ /dev/null
+@@ -1,28 +0,0 @@
+-{% if bootstrap_host_encapsulation_enabled | bool %}
+-{% for nic_name, nic_details in bootstrap_host_encapsulation_interfaces.items() %}
+-# {{ nic_details.friendly_name }}
+-auto {{ nic_name }}
+-iface {{ nic_name }} inet manual
+- pre-up ip link add {{ nic_name }} type vxlan id {{ nic_details.id }} group 239.0.0.{{ nic_details.id }} dev {{ nic_details.underlay_device }} || true
+- up ip link set $IFACE up
+- down ip link set $IFACE down
+- post-down ip link del {{ nic_name }} || true
+-
+-{% endfor %}
+-{% endif %}
+-{%- for nic_name, nic_details in bootstrap_host_bridges_interfaces.items() -%}
+-auto {{ nic_name }}
+-iface {{ nic_name }} inet {{ nic_details.mode | default('static') }}
+- bridge_stp off
+- bridge_waitport 0
+- bridge_fd 0
+- bridge_ports {{ nic_details.ports }}
+- offload-sg {{ nic_details.offload_sg | default('off') }}
+- {% if nic_details.mode | default('static') == 'static' -%}
+- address {{ nic_details.ip_address_range }}.{{ node_id }}
+- netmask {{ nic_details.ip_netmask }}
+- {% endif %}
+- {%- if nic_details.state_change_scripts is defined %}{{ nic_details.state_change_scripts }}
+- {% endif %}
+-
+-{% endfor %}
+diff --git a/tests/roles/bootstrap-host/templates/redhat_interface_alias.cfg.j2 b/tests/roles/bootstrap-host/templates/redhat_interface_alias.cfg.j2
+deleted file mode 100644
+index 79a04a85..00000000
+--- a/tests/roles/bootstrap-host/templates/redhat_interface_alias.cfg.j2
++++ /dev/null
+@@ -1,5 +0,0 @@
+-# This interface is an alias
+-DEVICE={{ item.name | default('br-mgmt') }}:0
+-IPADDR={{ item.alias | default('10.1.0.1') }}
+-NETMASK={{ item.netmask | default('255.255.255.0') }}
+-ONBOOT=yes
+diff --git a/tests/roles/bootstrap-host/templates/redhat_interface_default.cfg.j2 b/tests/roles/bootstrap-host/templates/redhat_interface_default.cfg.j2
+deleted file mode 100644
+index 25c2dfac..00000000
+--- a/tests/roles/bootstrap-host/templates/redhat_interface_default.cfg.j2
++++ /dev/null
+@@ -1,12 +0,0 @@
+-{% if item.veth_peer is defined %}
+-# This interface has a veth peer
+-{% endif %}
+-DEVICE={{ item.name | default('br-mgmt') }}
+-TYPE=Bridge
+-IPADDR={{ item.ip_addr | default('10.1.0.1') }}
+-NETMASK={{ item.netmask | default('255.255.255.0') }}
+-ONBOOT=yes
+-BOOTPROTO=none
+-NM_CONTROLLED=no
+-DELAY=0
+-ETHTOOL_OPTS="-K ${DEVICE} sg off"
+diff --git a/tests/roles/bootstrap-host/templates/rpm_interface_ifdown-post.cfg.j2 b/tests/roles/bootstrap-host/templates/rpm_interface_ifdown-post.cfg.j2
+deleted file mode 100644
+index e35945df..00000000
+--- a/tests/roles/bootstrap-host/templates/rpm_interface_ifdown-post.cfg.j2
++++ /dev/null
+@@ -1,29 +0,0 @@
+-#!/usr/bin/env bash
+-# Copyright 2014, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-source /etc/os-release || source /usr/lib/os-release
+-
+-case "${ID}" in
+- *suse*) INTERFACE="${1}"; ;;
+- centos|rhel|fedora) INTERFACE="${DEVICE}"; ;;
+- *) echo "Unsupported distribution ${ID}"; exit 1;
+-esac
+-
+-_ip=$(which ip 2>/dev/null || { echo "Failed to find ip executable"; exit 1; })
+-
+-if [ "${INTERFACE}" == "{{ item[1].name | default('br-mgmt') }}" ]; then
+- eval $_ip link set {{ item[1].name | default('br-mgmt') }}-veth nomaster || true
+- eval $_ip link del {{ item[1].name | default('br-mgmt') }}-veth || true
+-fi
+diff --git a/tests/roles/bootstrap-host/templates/rpm_interface_ifup-post.cfg.j2 b/tests/roles/bootstrap-host/templates/rpm_interface_ifup-post.cfg.j2
+deleted file mode 100644
+index 807b1cae..00000000
+--- a/tests/roles/bootstrap-host/templates/rpm_interface_ifup-post.cfg.j2
++++ /dev/null
+@@ -1,35 +0,0 @@
+-#!/usr/bin/env bash
+-# Copyright 2014, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-source /etc/os-release || source /usr/lib/os-release
+-
+-case "${ID}" in
+- *suse*) INTERFACE="${1}"; ;;
+- centos|rhel|fedora) INTERFACE="${DEVICE}"; ;;
+- *) echo "Unsupported distribution ${ID}"; exit 1;
+-esac
+-
+-_ip=$(which ip 2>/dev/null || { echo "Failed to find ip executable"; exit 1; })
+-
+-if [ "${INTERFACE}" == "{{ item[1].name | default('br-mgmt') }}" ]; then
+- # Create veth pair, don't bomb if already exists
+- echo "Creating veth"
+- eval $_ip link add {{ item[1].name | default('br-mgmt') }}-veth type veth peer name {{ item[1].veth_peer | default('eth0') }} || true
+- # Set both ends UP
+- eval $_ip link set {{ item[1].name | default('br-mgmt') }}-veth up || true
+- eval $_ip link set {{ item[1].veth_peer | default('eth0') }} up || true
+- # add eth12 to the bridge
+- eval $_ip link set {{ item[1].name | default('br-mgmt') }}-veth master {{ item[1].name | default('br-mgmt') }} || true
+-fi
+diff --git a/tests/roles/bootstrap-host/templates/suse_interface_default.cfg.j2 b/tests/roles/bootstrap-host/templates/suse_interface_default.cfg.j2
+deleted file mode 100644
+index f0cc3770..00000000
+--- a/tests/roles/bootstrap-host/templates/suse_interface_default.cfg.j2
++++ /dev/null
+@@ -1,9 +0,0 @@
+-{% if item.veth_peer is defined %}
+-# This interface has a veth peer
+-{% endif %}
+-BRIDGE='yes'
+-IPADDR={{ item.ip_addr | default('10.1.0.1') }}
+-NETMASK={{ item.netmask | default('255.255.255.0') }}
+-STARTMODE='auto'
+-BOOTPROTO='static'
+-ETHTOOL_OPTIONS_sg='-K iface sg off'
+diff --git a/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2 b/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
+deleted file mode 100644
+index d0489ea4..00000000
+--- a/tests/roles/bootstrap-host/templates/user_variables.aio.yml.j2
++++ /dev/null
+@@ -1,196 +0,0 @@
+----
+-# Copyright 2014, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-## General options
+-debug: True
+-
+-## Tempest settings
+-tempest_public_subnet_cidr: 172.29.248.0/22
+-tempest_public_subnet_allocation_pools: "172.29.249.110-172.29.249.200"
+-
+-## Galera settings
+-galera_innodb_buffer_pool_size: 16M
+-galera_innodb_log_buffer_size: 4M
+-galera_wsrep_provider_options:
+- - { option: "gcache.size", value: "4M" }
+-
+-## Neutron settings
+-neutron_metadata_checksum_fix: True
+-
+-### Set workers for all services to optimise memory usage
+-
+-## Repo
+-repo_nginx_threads: 2
+-
+-## Keystone
+-keystone_httpd_mpm_start_servers: 2
+-keystone_httpd_mpm_min_spare_threads: 1
+-keystone_httpd_mpm_max_spare_threads: 2
+-keystone_httpd_mpm_thread_limit: 2
+-keystone_httpd_mpm_thread_child: 1
+-keystone_wsgi_threads: 1
+-keystone_wsgi_processes_max: 2
+-
+-## Barbican
+-barbican_wsgi_processes: 2
+-barbican_wsgi_threads: 1
+-
+-## Cinder
+-cinder_wsgi_processes_max: 2
+-cinder_wsgi_threads: 1
+-cinder_wsgi_buffer_size: 16384
+-cinder_osapi_volume_workers_max: 2
+-
+-## Glance
+-glance_api_threads_max: 2
+-glance_api_threads: 1
+-glance_api_workers: 1
+-glance_registry_workers: 1
+-
+-## Nova
+-nova_wsgi_threads: 1
+-nova_wsgi_processes_max: 2
+-nova_wsgi_processes: 2
+-nova_wsgi_buffer_size: 16384
+-nova_api_threads_max: 2
+-nova_api_threads: 1
+-nova_osapi_compute_workers: 1
+-nova_conductor_workers: 1
+-nova_metadata_workers: 1
+-
+-## Neutron
+-neutron_rpc_workers: 1
+-neutron_metadata_workers: 1
+-neutron_api_workers: 1
+-neutron_api_threads_max: 2
+-neutron_api_threads: 2
+-neutron_num_sync_threads: 1
+-
+-## Heat
+-heat_api_workers: 1
+-heat_api_threads_max: 2
+-heat_api_threads: 1
+-heat_wsgi_threads: 1
+-heat_wsgi_processes_max: 2
+-heat_wsgi_processes: 1
+-heat_wsgi_buffer_size: 16384
+-
+-## Horizon
+-horizon_wsgi_processes: 1
+-horizon_wsgi_threads: 1
+-horizon_wsgi_threads_max: 2
+-
+-## Ceilometer
+-ceilometer_notification_workers_max: 2
+-ceilometer_notification_workers: 1
+-
+-## AODH
+-aodh_wsgi_threads: 1
+-aodh_wsgi_processes_max: 2
+-aodh_wsgi_processes: 1
+-
+-## Gnocchi
+-gnocchi_wsgi_threads: 1
+-gnocchi_wsgi_processes_max: 2
+-gnocchi_wsgi_processes: 1
+-
+-## Swift
+-swift_account_server_replicator_workers: 1
+-swift_server_replicator_workers: 1
+-swift_object_replicator_workers: 1
+-swift_account_server_workers: 1
+-swift_container_server_workers: 1
+-swift_object_server_workers: 1
+-swift_proxy_server_workers_max: 2
+-swift_proxy_server_workers_not_capped: 1
+-swift_proxy_server_workers_capped: 1
+-swift_proxy_server_workers: 1
+-
+-## Ironic
+-ironic_wsgi_threads: 1
+-ironic_wsgi_processes_max: 2
+-ironic_wsgi_processes: 1
+-
+-## Trove
+-trove_api_workers_max: 2
+-trove_api_workers: 1
+-trove_conductor_workers_max: 2
+-trove_conductor_workers: 1
+-trove_wsgi_threads: 1
+-trove_wsgi_processes_max: 2
+-trove_wsgi_processes: 1
+-
+-## Sahara
+-sahara_api_workers_max: 2
+-sahara_api_workers: 1
+-
+-# NOTE: hpcloud-b4's eth0 uses 10.0.3.0/24, which overlaps with the
+-# lxc_net_address default
+-# TODO: We'll need to implement a mechanism to determine valid lxc_net_address
+-# value which will not overlap with an IP already assigned to the host.
+-lxc_net_address: 10.255.255.1
+-lxc_net_netmask: 255.255.255.0
+-lxc_net_dhcp_range: 10.255.255.2,10.255.255.253
+-
+-{% if repo_build_pip_extra_indexes is defined and repo_build_pip_extra_indexes|length > 0 %}
+-## Wheel mirrors for the repo_build to use
+-repo_build_pip_extra_indexes:
+-{{ repo_build_pip_extra_indexes | to_nice_yaml }}
+-{% endif %}
+-
+-{% if _lxc_mirror is defined and _lxc_mirror.stdout_lines is defined %}
+-## images.linuxcontainers.org reverse proxy
+-lxc_image_cache_server_mirrors:
+- - "http://{{ _lxc_mirror.stdout_lines[0] }}"
+-{% endif %}
+-
+-{% if cache_timeout is defined %}
+-## Package cache timeout
+-cache_timeout: {{ cache_timeout }}
+-{% endif %}
+-
+-# The container backing store is set to 'machinectl' to speed up the
+-# AIO build time. Options are: [machinectl, overlayfs, btrfs, zfs, dir, lvm]
+-lxc_container_backing_store: "machinectl"
+-
+-## Enable LBaaSv2 in the AIO
+-neutron_plugin_base:
+- - router
+- - metering
+- - neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2
+-
+-## Always setup tempest, the resources for it, then execute tests
+-tempest_install: yes
+-tempest_run: yes
+-
+-{% if nodepool_dir.stat.exists %}
+-# Disable chronyd in OpenStack CI
+-security_rhel7_enable_chrony: no
+-{% endif %}
+-
+-# For testing purposes in public clouds, we need to ignore these
+-# services when trying to do a reload of nova services.
+-nova_service_negate:
+- - "nova-agent.service"
+- - "nova-resetnetwork.service"
+-
+-{% if _pypi_wheel_mirror is defined and _pypi_wheel_mirror.stdout_lines is defined %}
+-repo_nginx_pypi_upstream: "{{ _pypi_wheel_mirror.stdout_lines[0] | netloc }}"
+-repo_build_pip_extra_indexes:
+- - "{{ _pypi_wheel_mirror.stdout_lines[1] }}"
+-{% endif %}
+-
+-# Set the container tech. Options are "lxc"
+-container_tech: "{{ container_tech }}"
+diff --git a/tests/roles/bootstrap-host/templates/user_variables_ceph.yml.j2 b/tests/roles/bootstrap-host/templates/user_variables_ceph.yml.j2
+deleted file mode 100644
+index a9cb5dfc..00000000
+--- a/tests/roles/bootstrap-host/templates/user_variables_ceph.yml.j2
++++ /dev/null
+@@ -1,29 +0,0 @@
+----
+-# Copyright 2017, Logan Vig <logan2211@gmail.com>
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-## ceph-ansible AIO settings
+-common_single_host_mode: true
+-monitor_interface: eth1 # Management network in the AIO
+-public_network: "{{ (mgmt_range ~ '.0/' ~ netmask) | ipaddr('net') }}"
+-journal_size: 100
+-osd_scenario: collocated
+-pool_default_pg_num: 32
+-openstack_config: true # Ceph ansible automatically creates pools & keys
+-cinder_ceph_client: cinder
+-cinder_default_volume_type: RBD
+-glance_ceph_client: glance
+-glance_default_store: rbd
+-glance_rbd_store_pool: images
+-nova_libvirt_images_rbd_pool: vms
+diff --git a/tests/roles/bootstrap-host/templates/user_variables_octavia.yml.j2 b/tests/roles/bootstrap-host/templates/user_variables_octavia.yml.j2
+deleted file mode 100644
+index 0d737e76..00000000
+--- a/tests/roles/bootstrap-host/templates/user_variables_octavia.yml.j2
++++ /dev/null
+@@ -1,25 +0,0 @@
+----
+-# Copyright 2017, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-# Octavia specific stuff
+-octavia_system_home_folder: {{ bootstrap_host_octavia_tmp }}
+-neutron_lbaas_octavia: True
+-octavia_amp_image_file_name: {{ bootstrap_host_octavia_tmp }}/amphora-x64-haproxy.qcow2
+-octavia_amp_image_upload_enabled: True
+-octavia_glance_image_tag:
+-octavia_management_net_subnet_cidr: 172.29.252.0/22
+-
+-# make glance only use file
+-glance_default_store: file
+diff --git a/tests/roles/bootstrap-host/vars/redhat.yml b/tests/roles/bootstrap-host/vars/redhat.yml
+deleted file mode 100644
+index 905610ab..00000000
+--- a/tests/roles/bootstrap-host/vars/redhat.yml
++++ /dev/null
+@@ -1,39 +0,0 @@
+----
+-# Copyright 2017, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-rdo_package: "https://rdoproject.org/repos/openstack-pike/rdo-release-pike.rpm"
+-
+-packages_install:
+- - bridge-utils
+- - btrfs-progs
+- - curl
+- - dbus
+- - ethtool
+- - git
+- - iputils
+- - lvm2
+- - python
+- - python-devel
+- - sshpass
+- - tmux
+- - vim
+- - xfsprogs
+-
+-packages_remove: []
+-
+-swap_create_command: "dd if=/dev/zero of=/openstack/swap.img bs=1M count={{ (bootstrap_host_swap_size | int) * 1024 }}"
+-rc_local: /etc/rc.d/rc.local
+-rc_local_insert_before: "^touch /var/lock/subsys/local$"
+-
+diff --git a/tests/roles/bootstrap-host/vars/suse.yml b/tests/roles/bootstrap-host/vars/suse.yml
+deleted file mode 100644
+index 19488c5c..00000000
+--- a/tests/roles/bootstrap-host/vars/suse.yml
++++ /dev/null
+@@ -1,40 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-# Copyright 2017, SUSE LINUX GmbH.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-opensuse_openstack_repos:
+- - name: "OBS:Cloud:OpenStack:Pike"
+- uri: "http://download.opensuse.org/repositories/Cloud:/OpenStack:/Pike/openSUSE_Leap_{{ ansible_distribution_version }}"
+-
+-packages_install:
+- - bridge-utils
+- - btrfsprogs
+- - curl
+- - dbus-1
+- - ethtool
+- - git-core
+- - lvm2
+- - python
+- - python-devel
+- - tmux
+- - vim
+- - vlan
+- - xfsprogs
+-
+-packages_remove: []
+-
+-swap_create_command: "dd if=/dev/zero of=/openstack/swap.img bs=1M count={{ (bootstrap_host_swap_size | int) * 1024 }}"
+-rc_local: /etc/rc.d/boot.local
+-rc_local_insert_before: EOF
+diff --git a/tests/roles/bootstrap-host/vars/ubuntu.yml b/tests/roles/bootstrap-host/vars/ubuntu.yml
+deleted file mode 100644
+index 2f2d35a6..00000000
+--- a/tests/roles/bootstrap-host/vars/ubuntu.yml
++++ /dev/null
+@@ -1,45 +0,0 @@
+----
+-# Copyright 2015, Rackspace US, Inc.
+-#
+-# Licensed under the Apache License, Version 2.0 (the "License");
+-# you may not use this file except in compliance with the License.
+-# You may obtain a copy of the License at
+-#
+-# http://www.apache.org/licenses/LICENSE-2.0
+-#
+-# Unless required by applicable law or agreed to in writing, software
+-# distributed under the License is distributed on an "AS IS" BASIS,
+-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+-# See the License for the specific language governing permissions and
+-# limitations under the License.
+-
+-packages_install:
+- - apt-transport-https
+- - bridge-utils
+- - btrfs-tools
+- - build-essential
+- - curl
+- - dbus
+- - ethtool
+- - git-core
+- - iptables
+- - iputils-tracepath
+- - ipython
+- - linux-image-extra-{{ ansible_kernel }}
+- - lvm2
+- - parted
+- - python2.7
+- - python-dev
+- - sshpass
+- - tmux
+- - vim
+- - vlan
+- - xfsprogs
+-
+-packages_remove:
+- - libmysqlclient18
+- - mysql-common
+-
+-swap_create_command: "fallocate -l {{ bootstrap_host_swap_size }}G /openstack/swap.img"
+-rc_local: /etc/rc.local
+-rc_local_insert_before: "^exit 0$"
+diff --git a/tox.ini b/tox.ini
+deleted file mode 100644
+index 7daf9712..00000000
+--- a/tox.ini
++++ /dev/null
+@@ -1,160 +0,0 @@
+-[tox]
+-minversion = 2.0
+-skipsdist = True
+-envlist = linters,docs,releasenotes,inventory,py3-inventory
+-
+-
+-[testenv]
+-usedevelop = True
+-basepython = python2.7
+-install_command =
+- pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/queens} {opts} {packages}
+-deps =
+- -r{toxinidir}/global-requirement-pins.txt
+- -r{toxinidir}/test-requirements.txt
+-passenv =
+- HOME
+- http_proxy
+- HTTP_PROXY
+- https_proxy
+- HTTPS_PROXY
+- no_proxy
+- NO_PROXY
+-whitelist_externals =
+- bash
+-setenv =
+- PYTHONUNBUFFERED=1
+- PYTHONWARNINGS=default::DeprecationWarning
+- VIRTUAL_ENV={envdir}
+- WORKING_DIR={toxinidir}
+- ANSIBLE_EXTRA_ROLE_DIRS={toxinidir}/playbooks/roles:{homedir}/.ansible/roles/ceph-ansible/roles
+- ANSIBLE_ROLE_REQUIREMENTS_PATH={toxinidir}/ansible-role-requirements.yml
+- TEST_PLAYBOOK={toxinidir}/tests/bootstrap-aio.yml {toxinidir}/playbooks/setup-everything.yml
+- ANSIBLE_LINT_PARAMS=--exclude={homedir}/.ansible/roles
+-
+-
+-
+-[testenv:docs]
+-commands=
+- bash -c "rm -rf doc/build"
+- doc8 doc
+- python setup.py build_sphinx
+-
+-
+-
+-[testenv:deploy-guide]
+-commands = sphinx-build -a -E -W -d deploy-guide/build/doctrees -b html deploy-guide/source deploy-guide/build/html
+-
+-
+-
+-[doc8]
+-# Settings for doc8:
+-extensions = .rst
+-
+-
+-
+-[testenv:releasenotes]
+-commands =
+- sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
+-
+-
+-
+-# environment used by the -infra templated docs job
+-[testenv:venv]
+-commands =
+- {posargs}
+-
+-
+-
+-[testenv:pep8]
+-commands =
+- bash -c "{toxinidir}/tests/tests-repo-clone.sh"
+- bash -c "{toxinidir}/tests/common/test-pep8.sh"
+-
+-
+-
+-[flake8]
+-# Ignores the following rules due to how ansible modules work in general
+-# F403 'from ansible.module_utils.basic import *' used;
+-# unable to detect undefined names
+-ignore=F403
+-
+-
+-
+-[testenv:bashate]
+-commands =
+- bash -c "{toxinidir}/tests/tests-repo-clone.sh"
+- bash -c "{toxinidir}/tests/common/test-bashate.sh"
+-
+-
+-
+-# The deps URL should be set to the appropriate git URL.
+-# In the tests repo itself, the variable is uniquely set to
+-# the toxinidir so that the role is able to test itself, but
+-# the tox config is exactly the same as other repositories.
+-#
+-# The value for other repositories must be:
+-# http://git.openstack.org/cgit/openstack/openstack-ansible-tests/plain/test-ansible-deps.txt
+-# or for a stable branch:
+-# http://git.openstack.org/cgit/openstack/openstack-ansible-tests/plain/test-ansible-deps.txt?h=stable/newton
+-[testenv:ansible]
+-deps =
+- {[testenv]deps}
+- -r{toxinidir}/global-requirement-pins.txt
+- -rhttps://git.openstack.org/cgit/openstack/openstack-ansible-tests/plain/test-ansible-deps.txt?h=stable/queens
+-
+-
+-
+-[testenv:ansible-syntax]
+-deps =
+- {[testenv:ansible]deps}
+-commands =
+- bash -c "{toxinidir}/tests/tests-repo-clone.sh"
+- bash -c "{toxinidir}/tests/common/test-ansible-syntax.sh"
+-
+-
+-
+-[testenv:ansible-lint]
+-deps =
+- {[testenv:ansible]deps}
+-commands =
+- bash -c "{toxinidir}/tests/tests-repo-clone.sh"
+- bash -c "{toxinidir}/tests/common/test-ansible-lint.sh"
+-
+-
+-
+-[testenv:inventory]
+-# Use a fixed seed since some inventory tests rely on specific ordering
+-setenv =
+- {[testenv]setenv}
+- PYTHONHASHSEED = 100
+-commands =
+- coverage erase
+- coverage run -a {toxinidir}/tests/test_inventory.py
+- coverage run -a {toxinidir}/tests/test_manage.py
+- coverage run -a {toxinidir}/tests/test_dictutils.py
+- coverage run -a {toxinidir}/tests/test_ip.py
+- coverage run -a {toxinidir}/tests/test_filesystem.py
+- coverage report --show-missing --include={toxinidir}/inventory/*,{toxinidir}/osa_toolkit/*
+-
+-
+-
+-[testenv:py3-inventory]
+-basepython = python3.5
+-setenv =
+- {[testenv:inventory]setenv}
+-commands =
+- {[testenv:inventory]commands}
+-
+-
+-
+-[testenv:linters]
+-deps =
+- {[testenv:ansible]deps}
+-commands =
+- {[testenv:pep8]commands}
+- {[testenv:bashate]commands}
+- {[testenv:ansible-lint]commands}
+- {[testenv:ansible-syntax]commands}
+- {[testenv:inventory]commands}
+- {[testenv:docs]commands}