3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
15 # pylint: disable=missing-docstring,invalid-name,too-few-public-methods,too-many-instance-attributes,too-many-lines
19 from jinja2 import Environment
20 from cmframework.apis import cmansibleinventoryconfig
21 from cmframework.apis import cmerror
22 from cmdatahandlers.api import configerror
23 from serviceprofiles import profiles
24 import hw_detector.hw_detect_lib as hw
29 NEAREST_POWER_OF_2_PERCENTAGE = 0.25
31 TARGET_PGS_PER_OSD_NO_INCREASE_EXPECTED = 100
32 TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED = 200
33 TARGET_PGS_PER_OSD_TWO_TO_THREE_TIMES_SIZE_INCREASE_EXPECTED = 300
34 # Please visit ceph.com/pgcalc for details on previous values
40 """Calculates the pg_num for the given attributes."""
42 def __init__(self, number_of_pool_osds, pool_data_percentage, number_of_replicas):
43 self._number_of_pool_osds = number_of_pool_osds
44 self._pool_data_percentage = pool_data_percentage
45 self._number_of_replicas = number_of_replicas
48 def _round_up_to_closest_power_of_2(num):
49 """Smallest power of 2 greater than or equal to num."""
50 return 2**(num-1).bit_length() if num > 0 else 1
53 def _round_down_to_closest_power_of_2(num):
54 """Largest power of 2 less than or equal to num."""
55 return 2**(num.bit_length()-1) if num > 0 else 1
58 def _check_percentage_of_values(diff_to_lower, org_pgnum):
59 """ If the nearest power of 2 is more than 25% below the original value,
60 the next higher power of 2 is used. Please visit ceph.com/pgcalc
62 return float(float(diff_to_lower) / float(org_pgnum)) > NEAREST_POWER_OF_2_PERCENTAGE
64 def _rounded_pgnum_to_the_nearest_power_of_2(self, pgnum):
65 higher_power = self._round_up_to_closest_power_of_2(pgnum)
66 lower_power = self._round_down_to_closest_power_of_2(pgnum)
67 diff_to_lower = pgnum - lower_power
68 if pgnum != 0 and self._check_percentage_of_values(diff_to_lower, pgnum):
72 def _calculate_pg_num_formula(self, number_of_pool_osds, pool_percentage):
73 return TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED \
74 * number_of_pool_osds * float(pool_percentage) / self._number_of_replicas
76 def _select_pgnum_formula_result(self, number_of_pool_osds, pool_percentage):
77 pgnum = self._calculate_pg_num_formula(number_of_pool_osds, pool_percentage)
78 return int(math.ceil(max(pgnum, MINIMUM_PG_NUM)))
81 """ The formula of the calculation can be found from ceph.com/pgcalc.
83 pgnum = (target_pgs x number_of_osds_in_pool x pool_percentage)/number_of_replicas
84 return : rounded pgnum to the nearest power of 2
87 pgnum = self._select_pgnum_formula_result(
88 self._number_of_pool_osds, self._pool_data_percentage)
89 return self._rounded_pgnum_to_the_nearest_power_of_2(pgnum)
93 SUPPORTED_INSTANCE_BACKENDS = ['default', 'cow', 'lvm']
94 ALL_DEFAULT_INSTANCE_BACKENDS = SUPPORTED_INSTANCE_BACKENDS + ['rbd']
96 DEFAULT_INSTANCE_LV_PERCENTAGE = "100"
98 USER_SECRETS = "/etc/openstack_deploy/user_secrets.yml"
100 # Ceph PG share percentages for Openstack pools
101 OSD_POOL_IMAGES_PG_NUM_PERCENTAGE = 0.09
102 OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE = 0.69
103 OSD_POOL_VMS_PG_NUM_PERCENTAGE = 0.20
104 OSD_POOL_SHARED_PG_NUM_PERCENTAGE = 0.02
105 # Ceph PG share percentages for CaaS pools
106 OSD_POOL_CAAS_PG_NUM_PERCENTAGE = 1.0
108 DEFAULT_ROOTDISK_DEVICE = "/dev/sda"
109 # root disk partition 2 system volume group VG percentages
110 INSTANCE_NODE_VG_PERCENTAGE = 0.47
111 NOT_INSTANCE_NODE_VG_PERCENTAGE = 1
113 /dev/sda1 fixed partition size : 50GiB fixed size = 10% of the total disk size
114 /dev/sda2 system VG partition size: 47% of remaining total disk size = 42% of total disk size
115 /dev/sda3 instance partition size 53% of remaining total disk size = 47% of total disk size
119 JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR = """
121 {% for host in hosts %}
123 "ext_ceph_user": "{{ ext_ceph_user }}",
124 "ext_ceph_user_key": "{{ ext_ceph_user_key }}",
125 "cephkeys_access_group": "cephkeys",
128 {% for host in hosts %}
130 {% if not loop.last %},{% endif %}
133 "ext_ceph_fsid": "{{ ext_ceph_fsid }}",
134 "ext_ceph_mon_hosts": "{{ ext_ceph_mon_hosts }}",
136 "cinder_service_hostname": "{{ host.name }}",
139 "volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
140 "rbd_pool": "{{ cinder_pool_name }}",
141 "rbd_ceph_conf": "/etc/ceph/ceph.conf",
142 "ceph_conf": "/etc/ceph/ceph.conf",
143 "rbd_flatten_volume_from_snapshot": "false",
144 "rbd_max_clone_depth": "5",
145 "rbd_store_chunk_size": "4",
146 "rados_connect_timeout": "-1",
147 "volume_backend_name": "RBD",
148 "rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
149 "rbd_user": "{{ ext_ceph_user }}",
150 "backend_host": "controller",
151 "rbd_exclusive_cinder_pool": "True"
155 "ext_openstack_pools": [
156 "{{ glance_pool_name }}",
157 "{{ cinder_pool_name }}",
158 "{{ nova_pool_name }}",
159 "{{ platform_pool_name }}"
162 "cinder_ceph_client": "{{ ext_ceph_user }}",
163 "nova_ceph_client": "{{ ext_ceph_user }}",
165 "glance_default_store": "rbd",
166 "glance_additional_stores": ["http", "cinder", "file"],
167 "glance_rbd_store_pool": "{{ glance_pool_name }}",
168 "glance_rbd_store_chunk_size": "8",
169 "glance_ceph_client": "{{ ext_ceph_user }}",
170 "ceph_conf": "/etc/ceph/ceph.conf"
172 } {% if not loop.last %},{% endif %}
177 JSON_CINDER_BACKENDS_HOST_VAR = """
179 {%- set loopvar = {'first_entry': True} %}
180 {% for host in hosts %}
181 {% if host.is_controller %}
182 {%- if not loopvar.first_entry %},{%- endif %}
183 {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
185 "cinder_service_hostname": "{{ host.name }}",
187 {% if openstack_storage == 'ceph' %}
189 "volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
190 "rbd_pool": "{{ cinder_pool_name }}",
191 "rbd_ceph_conf": "/etc/ceph/ceph.conf",
192 "ceph_conf": "/etc/ceph/ceph.conf",
193 "rbd_flatten_volume_from_snapshot": "false",
194 "rbd_max_clone_depth": "5",
195 "rbd_store_chunk_size": "4",
196 "rados_connect_timeout": "-1",
197 "volume_backend_name": "volumes_hdd",
198 "rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
199 "rbd_user": "cinder",
200 "backend_host": "controller",
201 "rbd_exclusive_cinder_pool": "True"
204 {% if openstack_storage == 'lvm' %}
206 "iscsi_ip_address": "{{ installation_controller_ip }}",
207 "volume_backend_name": "LVM_iSCSI",
208 "volume_driver": "cinder.volume.drivers.lvm.LVMVolumeDriver",
209 "volume_group": "cinder-volumes"
219 JSON_STORAGE_HOST_VAR = """
221 {%- set loopvar = {'first_entry': True} %}
222 {% for host in hosts %}
223 {% if host.is_rbd_ceph %}
224 {%- if not loopvar.first_entry %},{%- endif %}
225 {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
228 {% for disk in host.ceph_osd_disks %}
230 {%if not loop.last %},{% endif %}{% endfor %}]
237 JSON_STORAGE_HOST_DISK_CONFIGURATION = """
239 {% for host in hosts %}
242 { "os" : "{{ host.os_disk }}",
243 "osd" : "{{ host.ceph_osd_disks }}",
244 "osd_disks_ids" : "{{ host.osd_disks_ids }}"
246 "rootdisk_vg_percentage": "{{ host.vg_percentage }}",
247 "default_rootdisk_device": "{{ rootdisk_device }}"
248 } {% if not loop.last %},{% endif %}
254 JSON_LVM_STORAGE_HOST_VAR = """
256 {% for host in hosts %}
259 {% for disk in host.cinder_disks %}
261 {%if not loop.last %},{% endif %}{% endfor %}],
262 "cinder_physical_volumes": [
263 {% for disk in host.cinder_physical_volumes %}
265 {%if not loop.last %},{% endif %}{% endfor %}]
266 } {% if not loop.last %},{% endif %}
272 JSON_BARE_LVM_STORAGE_HOST_VAR = """
274 {% for host in hosts %}
276 {% if host.is_bare_lvm %}
279 {% for disk in host.bare_lvm_disks %}
281 {%if not loop.last %},{% endif %}{% endfor %}],
282 "physical_volumes": [
283 {% for disk in host.bare_lvm_physical_volumes %}
285 {%if not loop.last %},{% endif %}{% endfor %}],
286 "mount_options": "{{ host.mount_options }}",
287 "mount_dir": "{{ host.mount_dir }}",
288 "name": "{{ host.bare_lvm_lv_name }}"
291 } {% if not loop.last %},{% endif %}
296 JSON_DEVICE_HOST_VAR = """
298 {%- set loopvar = {'first_entry': True} %}
299 {% for host in hosts %}
300 {% if host.instance_physical_volumes %}
301 {%- if not loopvar.first_entry %},{%- endif %}
302 {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
305 {% for disk in host.instance_disks %}
307 {%if not loop.last %},{% endif %}
309 "instance_physical_volumes": [
310 {% for disk in host.instance_physical_volumes %}
312 {%if not loop.last %},{% endif %}
314 "instance_lv_percentage": "{{ host.instance_lv_percentage }}"
321 # /etc/ansible/roles/os_nova/templates/nova.conf.j2
322 JSON_NOVA_RBD_HOST_VAR = """
324 {% for host in hosts %}
326 "nova_libvirt_images_rbd_pool": "{{ nova_pool_name }}",
327 "nova_ceph_client": "{{ nova_ceph_client }}"
328 } {% if not loop.last %},{% endif %}
335 # /opt/ceph-ansible/group_vars/osds.yml
338 "ceph_conf_overrides": {
340 "mon_max_pg_per_osd": "400",
341 "mon_pg_warn_max_object_skew": "-1",
342 "osd_pool_default_size": "{{ osd_pool_default_size }}",
343 "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
344 "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
345 "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
346 "osd_heartbeat_grace": "3",
347 "osd_heartbeat_interval": "2",
348 "mon_osd_min_down_reporters": "1",
349 "mon_osd_adjust_heartbeat_grace": "false",
350 "auth_client_required": "cephx"
353 "mgr_modules": "dashboard"
356 "mon_health_preluminous_compat_warning": "false",
357 "mon_health_preluminous_compat": "true",
358 "mon_timecheck_interval": "60",
359 "mon_sd_reporter_subtree_level": "device",
360 "mon_clock_drift_allowed": "0.1"
363 "osd_mon_heartbeat_interval": "10",
364 "osd_mon_report_interval_min": "1",
365 "osd_mon_report_interval_max": "15"
370 JSON_OVERRIDE_CACHE = """
372 "ceph_conf_overrides": {
374 "mon_max_pg_per_osd": "400",
375 "mon_pg_warn_max_object_skew": "-1",
376 "osd_pool_default_size": "{{ osd_pool_default_size }}",
377 "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
378 "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
379 "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
380 "osd_heartbeat_grace": "3",
381 "osd_heartbeat_interval": "2",
382 "mon_osd_adjust_heartbeat_grace": "false",
383 "bluestore_cache_size": "1073741824",
384 "auth_client_required": "cephx"
387 "mgr_modules": "dashboard"
390 "mon_health_preluminous_compat_warning": "false",
391 "mon_health_preluminous_compat": "true",
392 "mon_timecheck_interval": "60",
393 "mon_sd_reporter_subtree_level": "device",
394 "mon_clock_drift_allowed": "0.1"
397 "osd_mon_heartbeat_interval": "10",
398 "osd_mon_report_interval_min": "1",
399 "osd_mon_report_interval_max": "15"
404 JSON_OVERRIDE_3CONTROLLERS = """
406 "ceph_conf_overrides": {
408 "mon_max_pg_per_osd": "400",
409 "mon_pg_warn_max_object_skew": "-1",
410 "osd_pool_default_size": "{{ osd_pool_default_size }}",
411 "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
412 "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
413 "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
414 "osd_heartbeat_grace": "3",
415 "osd_heartbeat_interval": "2",
416 "mon_osd_adjust_heartbeat_grace": "false",
417 "bluestore_cache_size": "1073741824",
418 "auth_client_required": "cephx"
421 "mgr_modules": "dashboard"
424 "mon_health_preluminous_compat_warning": "false",
425 "mon_health_preluminous_compat": "true",
427 "mon_election_timeout": "2",
428 "mon_lease_renew_interval_factor": "0.4",
429 "mon_lease_ack_timeout_factor": "1.5",
430 "mon_timecheck_interval": "60",
431 "mon_sd_reporter_subtree_level": "device",
432 "mon_clock_drift_allowed": "0.1"
435 "osd_mon_heartbeat_interval": "10",
436 "osd_mon_report_interval_min": "1",
437 "osd_mon_report_interval_max": "15"
445 "public_network": "{{ public_networks }}",
446 "cluster_network": "{{ cluster_networks }}"
452 "os_tuning_params": [{
453 "name": "vm.min_free_kbytes",
459 JSON_OSD_POOL_PGNUMS = """
461 "osd_pool_images_pg_num": "{{ osd_pool_images_pg_num }}",
462 "osd_pool_volumes_pg_num": "{{ osd_pool_volumes_pg_num }}",
463 "osd_pool_vms_pg_num": "{{ osd_pool_vms_pg_num }}",
464 "osd_pool_shared_pg_num": "{{ osd_pool_shared_pg_num }}"{%- if 0 < osd_pool_caas_pg_num %},
465 "osd_pool_caas_pg_num": "{{ osd_pool_caas_pg_num }}"
470 JSON_CEPH_HOSTS = """
472 "ceph-mon": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
473 "ceph-mon_hosts": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
474 "mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
475 "ceph_mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
476 "ceph-osd": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
477 "ceph-osd_hosts": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
478 "osds": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
479 "mgrs": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
480 "ceph-mgr": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ]
483 # "storage_backend": ceph
486 # Replaces variables in /opt/openstack-ansible/playbooks/inventory/group_vars/glance_all.yml
487 JSON_GLANCE_CEPH_ALL_GROUP_VARS = """
489 {% for host in hosts %}
491 "glance_default_store": "rbd",
492 "glance_additional_stores": ["http", "cinder", "file"],
493 "glance_rbd_store_pool": "{{ glance_pool_name }}",
494 "glance_rbd_store_chunk_size": "8",
495 "ceph_conf": "/etc/ceph/ceph.conf"
496 } {% if not loop.last %},{% endif %}
501 JSON_GLANCE_LVM_ALL_GROUP_VARS = """
503 {% for host in hosts %}
505 "glance_default_store": "file"
506 } {% if not loop.last %},{% endif %}
511 # ceph-ansible variables must be set at host_vars -level
512 # ceph-ansible sample variables in group_vars
513 # group_vars - all.yml.sample
514 JSON_CEPH_ANSIBLE_ALL_HOST_VARS = """
516 {% for host in hosts %}
518 "mon_group_name": "mons",
519 "osd_group_name": "osds",
520 "mgr_group_name": "mgrs",
521 "ceph_stable_release": "luminous",
522 "generate_fsid": "true",
524 "journal_size": "10240",
525 "osd_objectstore": "bluestore"
526 } {% if not loop.last %},{% endif %}
531 # pylint: disable=line-too-long
533 # group_vars - mons.yml.sample
534 JSON_CEPH_ANSIBLE_MONS_HOST_VARS = """
536 {% for host in hosts %}
538 "monitor_secret": "{{ '{{ monitor_keyring.stdout }}' }}",
539 "openstack_config": true,
540 "cephkeys_access_group": "cephkeys",
543 "name": "{{ platform_pool }}",
544 "pg_num": "{{ osd_pool_shared_pg_num }}",
546 }{% if is_openstack_deployment %},
548 "name": "{{ glance_pool }}",
549 "pg_num": "{{ osd_pool_images_pg_num }}",
553 "name": "{{ cinder_pool }}",
554 "pg_num": "{{ osd_pool_volumes_pg_num }}",
558 "name": "{{ nova_pool }}",
559 "pg_num": "{{ osd_pool_vms_pg_num }}",
563 {%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
566 "pg_num": "{{ osd_pool_caas_pg_num }}",
574 "key": "{{ ceph_keys['client.shared'] }}",
576 "mon_cap": "allow r",
577 "name": "client.shared",
578 "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ platform_pool }}"
579 }{% if is_openstack_deployment %},
582 "key": "{{ ceph_keys['client.glance'] }}",
584 "mon_cap": "allow r",
585 "name": "client.glance",
586 "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ glance_pool }}"
590 "key": "{{ ceph_keys['client.cinder'] }}",
592 "mon_cap": "allow r, allow command \\\\\\\\\\\\\\"osd blacklist\\\\\\\\\\\\\\"",
593 "name": "client.cinder",
594 "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ cinder_pool }}, allow rwx pool={{ nova_pool }}, allow rx pool={{ glance_pool }}"
597 {%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
600 "key": "{{ ceph_keys['client.caas'] }}",
602 "mon_cap": "allow r",
603 "name": "client.caas",
604 "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool=caas"
608 } {% if not loop.last %},{% endif %}
612 # pylint: enable=line-too-long
615 # group_vars - osds.yml.sample
616 JSON_CEPH_ANSIBLE_OSDS_HOST_VARS = """
618 {% for host in hosts %}
620 "raw_journal_devices": [],
621 "journal_collocation": true,
622 "raw_multi_journal": false,
623 "dmcrytpt_journal_collocation": false,
624 "dmcrypt_dedicated_journal": false,
625 "osd_scenario": "collocated",
626 "dedicated_devices": []
627 } {% if not loop.last %},{% endif %}
633 JSON_SINGLE_CONTROLLER_VAR = """
635 {% for host in hosts %}
637 "single_controller_host": true
638 } {% if not loop.last %},{% endif %}
651 self.is_rbd_ceph = None
652 self.ceph_osd_disks = []
654 self.cinder_disks = []
655 self.is_controller = False
656 self.is_compute = False
657 self.is_storage = False
658 self.instance_physical_volumes = []
659 self.cinder_physical_volumes = []
660 self.instance_disks = []
661 self.instance_lv_percentage = ""
663 self.osd_disks_ids = []
664 self.vg_percentage = NOT_INSTANCE_NODE_VG_PERCENTAGE
666 self.bare_lvm_disks = None
667 self.is_bare_lvm = None
668 self.bare_lvm_physical_volumes = None
669 self.mount_options = None
670 self.bare_lvm_lv_name = None
673 class storageinventory(cmansibleinventoryconfig.CMAnsibleInventoryConfigPlugin):
675 def __init__(self, confman, inventory, ownhost):
676 super(storageinventory, self).__init__(confman, inventory, ownhost)
678 self.storage_hosts = []
679 self.compute_hosts = []
680 self.controller_hosts = []
684 self.single_node_config = False
685 self._networking_config_handler = self.confman.get_networking_config_handler()
686 self._hosts_config_handler = self.confman.get_hosts_config_handler()
687 self._storage_config_handler = self.confman.get_storage_config_handler()
688 self._openstack_config_handler = self.confman.get_openstack_config_handler()
689 self._sp_config_handler = self.confman.get_storage_profiles_config_handler()
690 self._caas_config_handler = self.confman.get_caas_config_handler()
691 self._ceph_caas_pg_proportion = 0.0
692 self._ceph_openstack_pg_proportion = 0.0
693 self._ceph_keys_dict = None
694 self._cinder_pool_name = 'volumes'
695 self._glance_pool_name = 'images'
696 self._nova_pool_name = 'vms'
697 self._platform_pool_name = 'shared'
698 self._storage_profile_attribute_properties = {
699 'lvm_cinder_storage_partitions': {
701 'getter': self._sp_config_handler.get_profile_lvm_cinder_storage_partitions
704 'backends': ['bare_lvm'],
705 'getter': self._sp_config_handler.get_profile_bare_lvm_mount_options
708 'backends': ['bare_lvm'],
709 'getter': self._sp_config_handler.get_profile_bare_lvm_mount_dir
712 'backends': ['bare_lvm'],
713 'getter': self._sp_config_handler.get_profile_bare_lvm_lv_name
715 'nr_of_ceph_osd_disks': {
716 'backends': ['ceph'],
717 'getter': self._sp_config_handler.get_profile_nr_of_ceph_osd_disks
719 'lvm_instance_storage_partitions': {
720 'backends': ['lvm', 'bare_lvm'],
721 'getter': self._sp_config_handler.get_profile_lvm_instance_storage_partitions
723 'lvm_instance_cow_lv_storage_percentage': {
725 'getter': self._sp_config_handler.get_profile_lvm_instance_cow_lv_storage_percentage
727 'openstack_pg_proportion': {
728 'backends': ['ceph'],
729 'getter': self._sp_config_handler.get_profile_ceph_openstack_pg_proportion
731 'caas_pg_proportion': {
732 'backends': ['ceph'],
733 'getter': self._sp_config_handler.get_profile_ceph_caas_pg_proportion
737 def _is_host_managment(self, host):
738 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_management_service_profile(), host)
740 def _is_host_controller(self, host):
741 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_controller_service_profile(), host)
743 def _is_profile_in_hosts_profiles(self, profile, host):
744 node_service_profiles = self._hosts_config_handler.get_service_profiles(host)
745 return profile in node_service_profiles
747 def _is_host_compute(self, host):
748 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_compute_service_profile(), host)
750 def _is_host_caas_master(self, host):
751 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_caasmaster_service_profile(), host)
753 def _is_host_storage(self, host):
754 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_storage_service_profile(), host)
756 def _is_controller_has_compute(self):
757 if set.intersection(set(self.compute_hosts), set(self.controller_hosts)):
761 def _is_collocated_controller_node_config(self):
762 if set.intersection(set(self.storage_hosts), set(self.controller_hosts)):
766 def _is_collocated_3controllers_config(self):
767 if (self._is_collocated_controller_node_config() and
768 (len(self.controller_hosts) == 3) and (len(self.hosts) == 3)):
772 def _is_dedicated_storage_config(self):
773 collocated_config = set.intersection(set(self.storage_hosts), set(self.controller_hosts))
774 if collocated_config and (collocated_config == set(self.controller_hosts)):
776 elif self.storage_hosts:
781 def handle_bootstrapping(self):
782 self.handle('bootstrapping')
784 def handle_provisioning(self):
785 self.handle('provisioning')
787 def handle_postconfig(self):
788 self.handle('postconfig')
790 def handle_setup(self):
793 def _template_and_add_vars_to_hosts(self, template, **variables):
795 text = Environment().from_string(template).render(variables)
797 self._add_vars_for_hosts(text)
798 except Exception as exp:
799 raise cmerror.CMError(str(exp))
801 def _add_vars_for_hosts(self, inventory_text):
802 inventory = json.loads(inventory_text)
803 for host in inventory.keys():
804 for var, value in inventory[host].iteritems():
805 self.add_host_var(host, var, value)
808 def _read_cinder_ceph_client_uuid():
809 if os.path.isfile(USER_SECRETS):
810 d = dict(line.split(':', 1) for line in open(USER_SECRETS))
811 cinder_ceph_client_uuid = d['cinder_ceph_client_uuid'].strip()
812 return cinder_ceph_client_uuid
814 raise cmerror.CMError("The file {} does not exist.".format(USER_SECRETS))
816 def _add_cinder_backends(self):
817 self._template_and_add_vars_to_hosts(
818 JSON_CINDER_BACKENDS_HOST_VAR,
819 hosts=self.controller_hosts,
820 installation_controller_ip=self._installation_host_ip,
821 cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
822 openstack_storage=self._openstack_config_handler.get_storage_backend(),
823 cinder_pool_name=self._cinder_pool_name)
825 def _add_external_ceph_cinder_backends(self):
826 handler = self._storage_config_handler
827 self._template_and_add_vars_to_hosts(
828 JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR,
830 cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
831 ext_ceph_user=handler.get_ext_ceph_ceph_user(),
832 ext_ceph_user_key=handler.get_ext_ceph_ceph_user_key(),
833 ext_ceph_fsid=handler.get_ext_ceph_fsid(),
834 ext_ceph_mon_hosts=", ".join(handler.get_ext_ceph_mon_hosts()),
835 nova_pool_name=self._nova_pool_name,
836 glance_pool_name=self._glance_pool_name,
837 cinder_pool_name=self._cinder_pool_name,
838 platform_pool_name=self._platform_pool_name)
840 def _add_storage_nodes_configs(self):
842 for host in self.hosts:
844 rbdhosts.append(host)
845 self._template_and_add_vars_to_hosts(JSON_STORAGE_HOST_VAR, hosts=rbdhosts)
847 def _add_hdd_storage_configs(self):
848 self._template_and_add_vars_to_hosts(
849 JSON_STORAGE_HOST_DISK_CONFIGURATION,
851 rootdisk_device=DEFAULT_ROOTDISK_DEVICE)
853 def _add_lvm_storage_configs(self):
854 self._template_and_add_vars_to_hosts(JSON_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
856 def _add_bare_lvm_storage_configs(self):
857 self._template_and_add_vars_to_hosts(JSON_BARE_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
859 def _add_instance_devices(self):
860 self._template_and_add_vars_to_hosts(JSON_DEVICE_HOST_VAR, hosts=self.compute_hosts)
862 def _add_ceph_hosts(self):
863 self._add_host_group(
864 Environment().from_string(JSON_CEPH_HOSTS).render(
865 mons=self._mon_hosts,
866 osds=self._osd_hosts,
867 mgrs=self._mgr_hosts))
869 self._add_global_parameters(
870 Environment().from_string(JSON_CEPH_HOSTS).render(
871 mons=self._mon_hosts,
872 osds=self._osd_hosts,
873 mgrs=self._mgr_hosts))
875 def _add_glance(self):
876 if self.is_ceph_backend:
877 self._template_and_add_vars_to_hosts(
878 JSON_GLANCE_CEPH_ALL_GROUP_VARS,
880 glance_pool_name=self._glance_pool_name)
881 elif self.is_lvm_backend:
882 self._template_and_add_vars_to_hosts(JSON_GLANCE_LVM_ALL_GROUP_VARS, hosts=self.hosts)
884 def _add_ceph_ansible_all_sample_host_vars(self):
885 self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_ALL_HOST_VARS, hosts=self.hosts)
887 def _add_ceph_ansible_mons_sample_host_vars(self):
888 self._template_and_add_vars_to_hosts(
889 JSON_CEPH_ANSIBLE_MONS_HOST_VARS,
891 **self._get_ceph_vars())
894 def _ceph_keys(self):
895 if not self._ceph_keys_dict:
897 self._ceph_keys_dict = {
898 'client.shared': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip(),
899 'client.glance': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip(),
900 'client.cinder': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip(),
901 'client.caas': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip()
903 except Exception as exp:
904 raise cmerror.CMError(str(exp))
906 return self._ceph_keys_dict
908 def _get_ceph_vars(self):
910 'osd_pool_images_pg_num': self._calculated_images_pg_num,
911 'osd_pool_volumes_pg_num': self._calculated_volumes_pg_num,
912 'osd_pool_vms_pg_num': self._calculated_vms_pg_num,
913 'osd_pool_shared_pg_num': self._calculated_shared_pg_num,
914 'osd_pool_caas_pg_num': self._calculated_caas_pg_num,
915 'is_openstack_deployment': self._is_openstack_deployment,
916 'is_caas_deployment': self._is_caas_deployment,
917 'is_hybrid_deployment': self._is_hybrid_deployment,
918 'nova_pool': self._nova_pool_name,
919 'glance_pool': self._glance_pool_name,
920 'cinder_pool': self._cinder_pool_name,
921 'platform_pool': self._platform_pool_name,
922 'ceph_keys': self._ceph_keys
925 def _add_ceph_ansible_osds_sample_host_vars(self):
926 self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_OSDS_HOST_VARS, hosts=self.hosts)
929 if self.is_external_ceph_backend:
930 nova_ceph_client = self._storage_config_handler.get_ext_ceph_ceph_user()
932 nova_ceph_client = 'cinder'
934 self._template_and_add_vars_to_hosts(
935 JSON_NOVA_RBD_HOST_VAR, hosts=self.compute_hosts,
936 nova_pool_name=self._nova_pool_name,
937 nova_ceph_client=nova_ceph_client)
939 def _add_single_controller_host_var(self):
940 self._template_and_add_vars_to_hosts(
941 JSON_SINGLE_CONTROLLER_VAR, hosts=self.controller_hosts)
943 def _add_global_parameters(self, text):
945 inventory = json.loads(text)
946 for var, value in inventory.iteritems():
947 self.add_global_var(var, value)
948 except Exception as exp:
949 raise cmerror.CMError(str(exp))
951 def _add_host_group(self, text):
953 inventory = json.loads(text)
954 for var, value in inventory.iteritems():
955 self.add_host_group(var, value)
956 except Exception as exp:
957 raise cmerror.CMError(str(exp))
960 def cluster_network_cidrs(self):
962 network = self._networking_config_handler.get_infra_storage_cluster_network_name()
963 for domain in self._networking_config_handler.get_network_domains(network):
964 cidrs.append(self._networking_config_handler.get_network_cidr(network, domain))
965 return ','.join(cidrs)
968 def public_network_cidrs(self):
970 cluster_network = self._networking_config_handler.get_infra_storage_cluster_network_name()
971 public_network = self._networking_config_handler.get_infra_internal_network_name()
972 for domain in self._networking_config_handler.get_network_domains(cluster_network):
973 cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
974 for host in self._mon_hosts:
975 domain = self._hosts_config_handler.get_host_network_domain(host.name)
976 cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
977 return ','.join(cidrs)
979 def _add_networks(self):
980 self._add_global_parameters(
981 Environment().from_string(JSON_NETWORK).render(
982 public_networks=self.public_network_cidrs,
983 cluster_networks=self.cluster_network_cidrs))
985 def _add_monitor_address(self):
986 infra_storage_network = self._networking_config_handler.get_infra_internal_network_name()
987 for host in self._mon_hosts:
989 self._networking_config_handler.get_host_ip(host.name, infra_storage_network)
990 self.add_host_var(host.name, "monitor_address", monitor_address)
992 def _add_override_settings(self):
993 ceph_osd_pool_size = self._storage_config_handler.get_ceph_osd_pool_size()
995 if self._is_collocated_3controllers_config():
996 self._add_global_parameters(
997 Environment().from_string(JSON_OVERRIDE_3CONTROLLERS).render(
998 osd_pool_default_size=ceph_osd_pool_size,
999 osd_pool_default_min_size=str(ceph_osd_pool_size-1),
1000 osd_pool_default_pg_num=self._calculated_default_pg_num))
1002 self._add_global_parameters(
1003 Environment().from_string(JSON_OS_TUNING).render())
1005 elif self._is_controller_has_compute():
1006 self._add_global_parameters(
1007 Environment().from_string(JSON_OVERRIDE_CACHE).render(
1008 osd_pool_default_size=ceph_osd_pool_size,
1009 osd_pool_default_min_size=str(ceph_osd_pool_size-1),
1010 osd_pool_default_pg_num=self._calculated_default_pg_num))
1012 self._add_global_parameters(
1013 Environment().from_string(JSON_OS_TUNING).render())
1015 self._add_global_parameters(
1016 Environment().from_string(JSON_OVERRIDE).render(
1017 osd_pool_default_size=ceph_osd_pool_size,
1018 osd_pool_default_min_size=str(ceph_osd_pool_size-1),
1019 osd_pool_default_pg_num=self._calculated_default_pg_num))
1021 def _calculate_pg_num(self, pool_data_percentage):
1022 pgnum = PGNum(self._total_number_of_osds,
1023 pool_data_percentage,
1024 self._number_of_replicas)
1025 return pgnum.calculate()
1028 def _calculated_default_pg_num(self):
1029 return self._calculate_pg_num(self._pool_data_percentage)
1032 def _calculated_volumes_pg_num(self):
1033 return self._calculate_pg_num(
1034 OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1037 def _calculated_images_pg_num(self):
1038 return self._calculate_pg_num(
1039 OSD_POOL_IMAGES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1042 def _calculated_vms_pg_num(self):
1043 return self._calculate_pg_num(
1044 OSD_POOL_VMS_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1047 def _calculated_shared_pg_num(self):
1048 return self._calculate_pg_num(
1049 OSD_POOL_SHARED_PG_NUM_PERCENTAGE)
1052 def _calculated_caas_pg_num(self):
1053 if self._ceph_caas_pg_proportion > 0:
1054 return self._calculate_pg_num(
1055 (OSD_POOL_CAAS_PG_NUM_PERCENTAGE - OSD_POOL_SHARED_PG_NUM_PERCENTAGE) *
1056 self._ceph_caas_pg_proportion)
1059 def _add_osd_pool_pg_nums(self):
1060 self._add_global_parameters(
1061 Environment().from_string(JSON_OSD_POOL_PGNUMS).render(**self._get_ceph_vars()))
1064 def _installation_host(self):
1065 return self._hosts_config_handler.get_installation_host()
1068 def _infra_internal_network_name(self):
1069 return self._networking_config_handler.get_infra_internal_network_name()
1072 def _installation_host_ip(self):
1073 return self._networking_config_handler.get_host_ip(
1074 self._installation_host, self._infra_internal_network_name)
1077 def is_ceph_backend(self):
1078 return self._storage_config_handler.is_ceph_enabled()
1081 def is_external_ceph_backend(self):
1082 return (self._storage_config_handler.is_external_ceph_enabled() and
1083 self._ceph_is_openstack_storage_backend)
1085 def _set_external_ceph_pool_names(self):
1086 if self.is_external_ceph_backend:
1087 h = self._storage_config_handler
1088 self._nova_pool_name = h.get_ext_ceph_nova_pool()
1089 self._cinder_pool_name = h.get_ext_ceph_cinder_pool()
1090 self._glance_pool_name = h.get_ext_ceph_glance_pool()
1091 self._platform_pool_name = h.get_ext_ceph_platform_pool()
1094 def _lvm_is_openstack_storage_backend(self):
1095 return True if self._openstack_config_handler.get_storage_backend() == 'lvm' else False
1098 def _ceph_is_openstack_storage_backend(self):
1099 return True if self._openstack_config_handler.get_storage_backend() == 'ceph' else False
1102 def is_lvm_backend(self):
1103 return (self._storage_config_handler.is_lvm_enabled() and
1104 self._lvm_is_openstack_storage_backend)
1107 def instance_default_backend(self):
1108 return self._openstack_config_handler.get_instance_default_backend()
1111 def _hosts_with_ceph_storage_profile(self):
1112 # return filter(lambda host: host.is_rbd, self.hosts)
1113 return [host for host in self.hosts if host.is_rbd_ceph]
1116 def _is_openstack_deployment(self):
1117 return self._caas_config_handler.is_openstack_deployment()
1120 def _is_caas_deployment(self):
1121 return self._caas_config_handler.is_caas_deployment()
1124 def _is_hybrid_deployment(self):
1125 return self._caas_config_handler.is_hybrid_deployment()
1127 def handle(self, phase):
1128 self._init_jinja_environment()
1129 self.add_global_var("external_ceph_configured", self.is_external_ceph_backend)
1130 self.add_global_var("ceph_configured", self.is_ceph_backend)
1131 self.add_global_var("lvm_configured", self.is_lvm_backend)
1132 if phase == 'bootstrapping':
1133 self._add_hdd_storage_configs()
1135 self._add_hdd_storage_configs()
1136 if self.is_external_ceph_backend:
1137 self._set_external_ceph_pool_names()
1138 self._add_external_ceph_cinder_backends()
1140 if self._is_openstack_deployment:
1141 self._add_cinder_backends()
1144 ceph_hosts = self._hosts_with_ceph_storage_profile
1146 self._set_ceph_pg_proportions(ceph_hosts)
1147 self._add_ceph_ansible_all_sample_host_vars()
1148 self._add_ceph_ansible_mons_sample_host_vars()
1149 self._add_ceph_ansible_osds_sample_host_vars()
1150 self._add_ceph_hosts()
1151 self._add_storage_nodes_configs()
1152 self._add_monitor_address()
1153 self._add_override_settings()
1154 self._add_osd_pool_pg_nums()
1155 self._add_networks()
1156 self.add_global_var("cinder_ceph_client_uuid", self._read_cinder_ceph_client_uuid())
1157 if self.is_lvm_backend:
1158 self._add_lvm_storage_configs()
1159 self._add_bare_lvm_storage_configs()
1161 self.add_global_var("instance_default_backend", self.instance_default_backend)
1162 self.add_global_var("storage_single_node_config", self.single_node_config)
1163 self.add_global_var("one_controller_node_config", self._is_one_controller_node_config)
1164 if self._is_one_controller_node_config:
1165 self._add_single_controller_host_var()
1166 self.add_global_var("collocated_controller_node_config",
1167 self._is_collocated_controller_node_config())
1168 self.add_global_var("dedicated_storage_node_config",
1169 self._is_dedicated_storage_config())
1170 self.add_global_var("storage_one_controller_multi_nodes_config",
1171 self._is_one_controller_multi_nodes_config)
1172 if self.instance_default_backend == 'rbd':
1174 elif self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1175 self._add_instance_devices()
1177 def _set_ceph_pg_proportions(self, ceph_hosts):
1178 # FIXME: First storage host's storage profile assumed to get pg proportion values
1179 hostname = ceph_hosts[0].name
1180 if self._is_hybrid_deployment:
1181 self._ceph_openstack_pg_proportion = self._get_ceph_openstack_pg_proportion(hostname)
1182 self._ceph_caas_pg_proportion = self._get_ceph_caas_pg_proportion(hostname)
1183 elif self._is_openstack_deployment:
1184 self._ceph_openstack_pg_proportion = 1.0
1185 self._ceph_caas_pg_proportion = 0.0
1186 elif self._is_caas_deployment:
1187 self._ceph_openstack_pg_proportion = 0.0
1188 self._ceph_caas_pg_proportion = 1.0
1190 def _init_host_data(self):
1191 hosts = self._hosts_config_handler.get_enabled_hosts()
1192 self.single_node_config = True if len(hosts) == 1 else False
1194 host = self._initialize_host_object(name)
1195 self.hosts.append(host)
1197 self._osd_hosts.append(host)
1199 self._mon_hosts.append(host)
1201 self._mgr_hosts.append(host)
1203 for host in self.hosts:
1205 self.compute_hosts.append(host)
1206 if host.is_controller:
1207 self.controller_hosts.append(host)
1209 self.storage_hosts.append(host)
1212 def _number_of_osd_hosts(self):
1213 return len(self._osd_hosts)
1216 def _is_one_controller_multi_nodes_config(self):
1217 if len(self.controller_hosts) == 1 and not self.single_node_config:
1222 def _is_one_controller_node_config(self):
1223 if len(self.controller_hosts) == 1:
1228 def _number_of_osds_per_host(self):
1229 first_osd_host = self._osd_hosts[0].name
1230 return self._get_nr_of_ceph_osd_disks(first_osd_host)
1233 def _total_number_of_osds(self):
1234 return self._number_of_osds_per_host * self._number_of_osd_hosts
1237 def _number_of_pools(self):
1238 """TODO: Get dynamically"""
1239 return NUMBER_OF_POOLS
1242 def _pool_data_percentage(self):
1243 return float(1.0 / self._number_of_pools)
1246 def _number_of_replicas(self):
1247 num = self._storage_config_handler.get_ceph_osd_pool_size()
1248 return 2 if num == 0 else num
1250 def _init_jinja_environment(self):
1251 self._init_host_data()
1253 def _is_backend_configured(self, backend, host_name):
1255 if self._get_storage_profile_for_backend(host_name, backend):
1258 except configerror.ConfigError:
1261 def _get_storage_profile_for_backend(self, host_name, *backends):
1262 storage_profiles = self._hosts_config_handler.get_storage_profiles(host_name)
1263 sp_handler = self._sp_config_handler
1264 for storage_profile in storage_profiles:
1265 if sp_handler.get_profile_backend(storage_profile) in backends:
1266 return storage_profile
1269 def _get_nr_of_ceph_osd_disks(self, host_name):
1270 return self._get_storage_profile_attribute(host_name, 'nr_of_ceph_osd_disks')
1272 def _get_storage_profile_attribute(self, host_name, attribute):
1273 attribute_properties = self._storage_profile_attribute_properties[attribute]
1274 storage_profile = self._get_storage_profile_for_backend(host_name,
1275 *attribute_properties['backends'])
1277 return attribute_properties['getter'](storage_profile)
1278 raise cmerror.CMError(str("Failed to get %s" % attribute))
1280 def _get_ceph_openstack_pg_proportion(self, host_name):
1281 return self._get_storage_profile_attribute(host_name, 'openstack_pg_proportion')
1283 def _get_ceph_caas_pg_proportion(self, host_name):
1284 return self._get_storage_profile_attribute(host_name, 'caas_pg_proportion')
1286 def _get_lvm_instance_storage_partitions(self, host_name):
1288 if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1289 return self._get_storage_profile_attribute(
1290 host_name, 'lvm_instance_storage_partitions')
1291 except configerror.ConfigError:
1294 if self.instance_default_backend not in ALL_DEFAULT_INSTANCE_BACKENDS:
1295 raise cmerror.CMError(
1296 str("Unknown instance_default_backend %s "
1297 "not supported" % self.instance_default_backend))
1300 def _get_lvm_cinder_storage_partitions(self, host_name):
1301 return self._get_storage_profile_attribute(host_name, 'lvm_cinder_storage_partitions')
1303 def _get_bare_lvm_mount_options(self, host_name):
1304 return self._get_storage_profile_attribute(host_name, 'mount_options')
1306 def _get_bare_lvm_mount_dir(self, host_name):
1307 return self._get_storage_profile_attribute(host_name, 'mount_dir')
1309 def _get_bare_lvm_lv_name(self, host_name):
1310 return self._get_storage_profile_attribute(host_name, 'lv_name')
1312 def _get_instance_lv_percentage(self, host_name):
1314 if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1315 return self._get_storage_profile_attribute(
1316 host_name, 'lvm_instance_cow_lv_storage_percentage')
1317 except configerror.ConfigError:
1318 return DEFAULT_INSTANCE_LV_PERCENTAGE
1319 raise cmerror.CMError(str("Failed to found lvm from storage_profiles"))
1321 def _is_osd_host(self, name):
1323 return bool(name in self._hosts_config_handler.get_service_profile_hosts('storage'))
1324 except configerror.ConfigError:
1327 def _is_rbd_ceph_configured(self, host_name):
1328 return self._is_backend_configured('ceph', host_name)
1330 def _is_lvm_configured(self, host_name):
1331 return self._is_backend_configured('lvm', host_name)
1333 def _is_bare_lvm_configured(self, host_name):
1334 return self._is_backend_configured('bare_lvm', host_name)
1336 def _get_hw_type(self, name):
1337 hwmgmt_addr = self._hosts_config_handler.get_hwmgmt_ip(name)
1338 hwmgmt_user = self._hosts_config_handler.get_hwmgmt_user(name)
1339 hwmgmt_pass = self._hosts_config_handler.get_hwmgmt_password(name)
1340 hwmgmt_priv_level = self._hosts_config_handler.get_hwmgmt_priv_level(name)
1341 return hw.get_hw_type(hwmgmt_addr, hwmgmt_user, hwmgmt_pass, hwmgmt_priv_level)
1344 def _get_os_disk(hw_type):
1345 return hw.get_os_hd(hw_type)
1347 def _get_osd_disks_for_embedded_deployment(self, host_name):
1348 return self._hosts_config_handler.get_ceph_osd_disks(host_name)
1351 def _get_osd_disks(hw_type):
1352 return hw.get_hd_with_usage(hw_type, "osd")
1354 def _by_path_disks(self, hw_type, nr_of_disks):
1355 return self._get_osd_disks(hw_type)[0:nr_of_disks]
1358 def _is_by_path_disks(disk_list):
1359 return [disk for disk in disk_list if "by-path" in disk]
1361 def _get_physical_volumes(self, disk_list):
1363 if self._is_by_path_disks(disk_list):
1364 return [disk+"-part"+partition_nr for disk in disk_list]
1366 return [disk+partition_nr for disk in disk_list]
1368 def _initialize_host_object(self, name):
1371 host.is_mgr = self._is_host_managment(host.name)
1372 host.is_controller = self._is_host_controller(host.name)
1373 host.is_compute = self._is_host_compute(host.name)
1374 host.is_storage = self._is_host_storage(host.name)
1375 host.is_rbd_ceph = self._is_rbd_ceph_configured(host.name)
1376 host.is_lvm = self._is_lvm_configured(host.name)
1377 host.is_bare_lvm = self._is_bare_lvm_configured(host.name)
1378 host.is_osd = self._is_osd_host(host.name)
1379 host.is_mon = host.is_mgr
1380 hw_type = self._get_hw_type(name)
1381 host.os_disk = self._get_os_disk(hw_type)
1382 if host.is_bare_lvm:
1383 partitions = self._get_lvm_instance_storage_partitions(host.name)
1384 host.bare_lvm_disks = self._by_path_disks(hw_type, len(partitions))
1385 host.bare_lvm_physical_volumes = self._get_physical_volumes(host.bare_lvm_disks)
1386 host.mount_options = self._get_bare_lvm_mount_options(host.name)
1387 host.mount_dir = self._get_bare_lvm_mount_dir(host.name)
1388 host.bare_lvm_lv_name = self._get_bare_lvm_lv_name(host.name)
1390 if host.is_compute and self.instance_default_backend != 'rbd':
1391 host.vg_percentage = INSTANCE_NODE_VG_PERCENTAGE
1393 if self.is_lvm_backend and host.is_controller:
1394 nr_of_cinder_disks = int(len(self._get_lvm_cinder_storage_partitions(host.name)))
1395 nr_of_nova_disks = int(len(self._get_lvm_instance_storage_partitions(host.name)))
1396 nr_of_all_disks = nr_of_cinder_disks + nr_of_nova_disks
1397 if nr_of_nova_disks > 0:
1398 host.cinder_disks = \
1399 self._by_path_disks(hw_type, nr_of_all_disks)[-nr_of_cinder_disks:]
1401 host.cinder_disks = self._by_path_disks(hw_type, nr_of_cinder_disks)
1402 host.cinder_physical_volumes = self._get_physical_volumes(host.cinder_disks)
1404 if host.is_rbd_ceph:
1405 nr_of_osd_disks = self._get_nr_of_ceph_osd_disks(host.name)
1406 if self._caas_config_handler.is_vnf_embedded_deployment():
1407 host.ceph_osd_disks = \
1408 self._get_osd_disks_for_embedded_deployment(host.name)[0:nr_of_osd_disks]
1410 host.ceph_osd_disks = self._get_osd_disks(hw_type)[0:nr_of_osd_disks]
1411 host.osd_disks_ids = range(1, nr_of_osd_disks+1)
1413 if host.is_lvm and host.is_compute:
1414 partitions = self._get_lvm_instance_storage_partitions(host.name)
1415 host.instance_disks = self._by_path_disks(hw_type, len(partitions))
1416 host.instance_physical_volumes = self._get_physical_volumes(host.instance_disks)
1417 host.instance_lv_percentage = self._get_instance_lv_percentage(host.name)