# Create a libvirt volume pool. This is where we'll be creating # images for the VMs # Note: the virt_pool module is not working properly on rhel-7.2 # https://bugs.launchpad.net/tripleo-quickstart/+bug/1597905 - name: ensure libvirt volume path exists become: true file: path: "{{ libvirt_volume_path }}" state: directory mode: 0755 - name: Check volume pool command: > virsh pool-uuid "{{ libvirt_volume_pool }}" register: pool_check ignore_errors: true changed_when: false environment: LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" - name: create the volume pool xml file template: src: volume_pool.xml.j2 dest: "{{ working_dir }}/volume_pool.xml" when: pool_check is failed - name: Define volume pool command: "virsh pool-define {{ working_dir }}/volume_pool.xml" when: pool_check is failed environment: LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" - name: Start volume pool virt_pool: command: start state: active name: "{{ libvirt_volume_pool }}" uri: "{{ libvirt_uri }}" # In some cases the pool_check can pass and the pool xml config is absent # In this case it is required to dump the xml and redefine the pool. - name: ensure tripleo-quickstart volume pool is defined shell: > virsh pool-dumpxml {{ libvirt_volume_pool }} | virsh pool-define /dev/stdin changed_when: true environment: LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" - name: Mark volume pool for autostart virt_pool: name: "{{ libvirt_volume_pool }}" autostart: "yes" uri: "{{ libvirt_uri }}" - when: vm_nodes environment: LIBVIRT_DEFAULT_URI: "{{ libvirt_uri }}" block: # Create libvirt volumes for the vm hosts. - name: Check if vm volumes exist command: > virsh vol-info --pool '{{ libvirt_volume_pool }}' '{{ item.name }}.qcow2' register: vm_vol_check ignore_errors: true with_items: "{{ vm_nodes }}" - name: Create vm vm storage command: > virsh vol-create-as '{{ libvirt_volume_pool }}' '{{ item.item.name }}'.qcow2 '{{ flavors[item.item.flavor].disk }}'G --format qcow2 when: - item is failed with_items: "{{ vm_vol_check.results }}" # Define (but do not start) the vm nodes. These will be # booted later by ironic during the provisioning process. - name: Define vm vms virt: name: "{{ item.name }}" command: define xml: "{{ lookup('template', 'baremetalvm.xml.j2') }}" uri: "{{ libvirt_uri }}" with_items: "{{ vm_nodes }}" # Create additional blockdevices for each objectstorage flavor node # These are sparse files, not using space if unused - name: Create additional blockdevice for objectstorage nodes command: > dd if=/dev/zero of={{ libvirt_volume_path }}/{{ item[0].name }}_{{ item[1] }}.img bs=1 count=0 seek={{ extradisks_size }} when: flavors[item[0].flavor].extradisks|default(false) with_nested: - "{{ vm_nodes }}" - "{{ extradisks_list }}" - name: Check if additional blockdevices are attached command: > virsh domblkinfo {{ item[0].name }} {{ libvirt_volume_path }}/{{ item[0].name }}_{{ item[1] }}.img when: flavors[item[0].flavor].extradisks|default(false) changed_when: false ignore_errors: true register: vm_extradisks_check with_nested: - "{{ vm_nodes }}" - "{{ extradisks_list }}" - name: Attach additional blockdevices to vm objectstorage VMs command: > virsh attach-disk --config {{ item.item[0].name }} {{ libvirt_volume_path }}/{{ item.item[0].name }}_{{ item.item[1] }}.img {{ item.item[1] }} when: item is failed with_items: "{{ vm_extradisks_check.results }}" # Generate the ironic node inventory files. Note that this # task *must* occur after the above vm tasks, because if # `vm_nodes` is defined the template depends on the # `node_mac_map` variable. - name: Write ironic node json files template: src: ../templates/ironic_nodes.json.j2 dest: "{{ working_dir }}/ironic_nodes.json"