3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
7 # http://www.apache.org/licenses/LICENSE-2.0
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
15 # pylint: disable=missing-docstring,invalid-name,too-few-public-methods,too-many-instance-attributes,too-many-lines
18 from jinja2 import Environment
19 from cmframework.apis import cmansibleinventoryconfig
20 from cmframework.apis import cmerror
21 from cmdatahandlers.api import configerror
22 from serviceprofiles import profiles
23 import hw_detector.hw_detect_lib as hw
28 NEAREST_POWER_OF_2_PERCENTAGE = 0.25
30 TARGET_PGS_PER_OSD_NO_INCREASE_EXPECTED = 100
31 TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED = 200
32 TARGET_PGS_PER_OSD_TWO_TO_THREE_TIMES_SIZE_INCREASE_EXPECTED = 300
33 # Please visit ceph.com/pgcalc for details on previous values
39 """Calculates the pg_num for the given attributes."""
41 def __init__(self, number_of_pool_osds, pool_data_percentage, number_of_replicas):
42 self._number_of_pool_osds = number_of_pool_osds
43 self._pool_data_percentage = pool_data_percentage
44 self._number_of_replicas = number_of_replicas
47 def _round_up_to_closest_power_of_2(num):
48 """Smallest power of 2 greater than or equal to num."""
49 return 2**(num-1).bit_length() if num > 0 else 1
52 def _round_down_to_closest_power_of_2(num):
53 """Largest power of 2 less than or equal to num."""
54 return 2**(num.bit_length()-1) if num > 0 else 1
57 def _check_percentage_of_values(diff_to_lower, org_pgnum):
58 """ If the nearest power of 2 is more than 25% below the original value,
59 the next higher power of 2 is used. Please visit ceph.com/pgcalc
61 return float(float(diff_to_lower) / float(org_pgnum)) > NEAREST_POWER_OF_2_PERCENTAGE
63 def _rounded_pgnum_to_the_nearest_power_of_2(self, pgnum):
64 higher_power = self._round_up_to_closest_power_of_2(pgnum)
65 lower_power = self._round_down_to_closest_power_of_2(pgnum)
66 diff_to_lower = pgnum - lower_power
67 if pgnum != 0 and self._check_percentage_of_values(diff_to_lower, pgnum):
71 def _calculate_pg_num_formula(self, number_of_pool_osds, pool_percentage):
72 return TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED \
73 * number_of_pool_osds * float(pool_percentage) / self._number_of_replicas
75 def _select_pgnum_formula_result(self, number_of_pool_osds, pool_percentage):
76 pgnum = self._calculate_pg_num_formula(number_of_pool_osds, pool_percentage)
77 return int(math.ceil(max(pgnum, MINIMUM_PG_NUM)))
80 """ The formula of the calculation can be found from ceph.com/pgcalc.
82 pgnum = (target_pgs x number_of_osds_in_pool x pool_percentage)/number_of_replicas
83 return : rounded pgnum to the nearest power of 2
86 pgnum = self._select_pgnum_formula_result(
87 self._number_of_pool_osds, self._pool_data_percentage)
88 return self._rounded_pgnum_to_the_nearest_power_of_2(pgnum)
92 SUPPORTED_INSTANCE_BACKENDS = ['default', 'cow', 'lvm']
93 ALL_DEFAULT_INSTANCE_BACKENDS = SUPPORTED_INSTANCE_BACKENDS + ['rbd']
95 DEFAULT_INSTANCE_LV_PERCENTAGE = "100"
97 USER_SECRETS = "/etc/openstack_deploy/user_secrets.yml"
99 # Ceph PG share percentages for Openstack pools
100 OSD_POOL_IMAGES_PG_NUM_PERCENTAGE = 0.09
101 OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE = 0.69
102 OSD_POOL_VMS_PG_NUM_PERCENTAGE = 0.20
103 OSD_POOL_SHARED_PG_NUM_PERCENTAGE = 0.02
104 # Ceph PG share percentages for CaaS pools
105 OSD_POOL_CAAS_PG_NUM_PERCENTAGE = 1.0
107 DEFAULT_ROOTDISK_DEVICE = "/dev/sda"
108 # root disk partition 2 system volume group VG percentages
109 INSTANCE_NODE_VG_PERCENTAGE = 0.47
110 NOT_INSTANCE_NODE_VG_PERCENTAGE = 1
112 /dev/sda1 fixed partition size : 50GiB fixed size = 10% of the total disk size
113 /dev/sda2 system VG partition size: 47% of remaining total disk size = 42% of total disk size
114 /dev/sda3 instance partition size 53% of remaining total disk size = 47% of total disk size
118 JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR = """
120 {% for host in hosts %}
122 "ext_ceph_user": "{{ ext_ceph_user }}",
123 "ext_ceph_user_key": "{{ ext_ceph_user_key }}",
124 "cephkeys_access_group": "cephkeys",
127 {% for host in hosts %}
129 {% if not loop.last %},{% endif %}
132 "ext_ceph_fsid": "{{ ext_ceph_fsid }}",
133 "ext_ceph_mon_hosts": "{{ ext_ceph_mon_hosts }}",
135 "cinder_service_hostname": "{{ host.name }}",
138 "volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
139 "rbd_pool": "{{ cinder_pool_name }}",
140 "rbd_ceph_conf": "/etc/ceph/ceph.conf",
141 "ceph_conf": "/etc/ceph/ceph.conf",
142 "rbd_flatten_volume_from_snapshot": "false",
143 "rbd_max_clone_depth": "5",
144 "rbd_store_chunk_size": "4",
145 "rados_connect_timeout": "-1",
146 "volume_backend_name": "RBD",
147 "rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
148 "rbd_user": "{{ ext_ceph_user }}",
149 "backend_host": "controller",
150 "rbd_exclusive_cinder_pool": "True"
154 "ext_openstack_pools": [
155 "{{ glance_pool_name }}",
156 "{{ cinder_pool_name }}",
157 "{{ nova_pool_name }}",
158 "{{ platform_pool_name }}"
161 "cinder_ceph_client": "{{ ext_ceph_user }}",
162 "nova_ceph_client": "{{ ext_ceph_user }}",
164 "glance_default_store": "rbd",
165 "glance_additional_stores": ["http", "cinder", "file"],
166 "glance_rbd_store_pool": "{{ glance_pool_name }}",
167 "glance_rbd_store_chunk_size": "8",
168 "glance_ceph_client": "{{ ext_ceph_user }}",
169 "ceph_conf": "/etc/ceph/ceph.conf"
171 } {% if not loop.last %},{% endif %}
176 JSON_CINDER_BACKENDS_HOST_VAR = """
178 {%- set loopvar = {'first_entry': True} %}
179 {% for host in hosts %}
180 {% if host.is_controller %}
181 {%- if not loopvar.first_entry %},{%- endif %}
182 {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
184 "cinder_service_hostname": "{{ host.name }}",
186 {% if openstack_storage == 'ceph' %}
188 "volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
189 "rbd_pool": "{{ cinder_pool_name }}",
190 "rbd_ceph_conf": "/etc/ceph/ceph.conf",
191 "ceph_conf": "/etc/ceph/ceph.conf",
192 "rbd_flatten_volume_from_snapshot": "false",
193 "rbd_max_clone_depth": "5",
194 "rbd_store_chunk_size": "4",
195 "rados_connect_timeout": "-1",
196 "volume_backend_name": "volumes_hdd",
197 "rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
198 "rbd_user": "cinder",
199 "backend_host": "controller",
200 "rbd_exclusive_cinder_pool": "True"
203 {% if openstack_storage == 'lvm' %}
205 "iscsi_ip_address": "{{ installation_controller_ip }}",
206 "volume_backend_name": "LVM_iSCSI",
207 "volume_driver": "cinder.volume.drivers.lvm.LVMVolumeDriver",
208 "volume_group": "cinder-volumes"
218 JSON_STORAGE_HOST_VAR = """
220 {%- set loopvar = {'first_entry': True} %}
221 {% for host in hosts %}
222 {% if host.is_rbd_ceph %}
223 {%- if not loopvar.first_entry %},{%- endif %}
224 {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
227 {% for disk in host.ceph_osd_disks %}
229 {%if not loop.last %},{% endif %}{% endfor %}]
236 JSON_STORAGE_HOST_DISK_CONFIGURATION = """
238 {% for host in hosts %}
241 { "os" : "{{ host.os_disk }}",
242 "osd" : "{{ host.ceph_osd_disks }}",
243 "osd_disks_ids" : "{{ host.osd_disks_ids }}"
245 "rootdisk_vg_percentage": "{{ host.vg_percentage }}",
246 "default_rootdisk_device": "{{ rootdisk_device }}"
247 } {% if not loop.last %},{% endif %}
253 JSON_LVM_STORAGE_HOST_VAR = """
255 {% for host in hosts %}
258 {% for disk in host.cinder_disks %}
260 {%if not loop.last %},{% endif %}{% endfor %}],
261 "cinder_physical_volumes": [
262 {% for disk in host.cinder_physical_volumes %}
264 {%if not loop.last %},{% endif %}{% endfor %}]
265 } {% if not loop.last %},{% endif %}
271 JSON_BARE_LVM_STORAGE_HOST_VAR = """
273 {% for host in hosts %}
275 {% if host.is_bare_lvm %}
278 {% for disk in host.bare_lvm_disks %}
280 {%if not loop.last %},{% endif %}{% endfor %}],
281 "physical_volumes": [
282 {% for disk in host.bare_lvm_physical_volumes %}
284 {%if not loop.last %},{% endif %}{% endfor %}],
285 "mount_options": "{{ host.mount_options }}",
286 "mount_dir": "{{ host.mount_dir }}",
287 "name": "{{ host.bare_lvm_lv_name }}"
290 } {% if not loop.last %},{% endif %}
295 JSON_DEVICE_HOST_VAR = """
297 {%- set loopvar = {'first_entry': True} %}
298 {% for host in hosts %}
299 {% if host.instance_physical_volumes %}
300 {%- if not loopvar.first_entry %},{%- endif %}
301 {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
304 {% for disk in host.instance_disks %}
306 {%if not loop.last %},{% endif %}
308 "instance_physical_volumes": [
309 {% for disk in host.instance_physical_volumes %}
311 {%if not loop.last %},{% endif %}
313 "instance_lv_percentage": "{{ host.instance_lv_percentage }}"
320 # /etc/ansible/roles/os_nova/templates/nova.conf.j2
321 JSON_NOVA_RBD_HOST_VAR = """
323 {% for host in hosts %}
325 "nova_libvirt_images_rbd_pool": "{{ nova_pool_name }}",
326 "nova_ceph_client": "{{ nova_ceph_client }}"
327 } {% if not loop.last %},{% endif %}
334 # /opt/ceph-ansible/group_vars/osds.yml
337 "ceph_conf_overrides": {
339 "mon_max_pg_per_osd": "400",
340 "mon_pg_warn_max_object_skew": "-1",
341 "osd_pool_default_size": "{{ osd_pool_default_size }}",
342 "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
343 "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
344 "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
345 "osd_heartbeat_grace": "3",
346 "osd_heartbeat_interval": "2",
347 "mon_osd_min_down_reporters": "1",
348 "mon_osd_adjust_heartbeat_grace": "false",
349 "auth_client_required": "cephx"
352 "mgr_modules": "dashboard"
355 "mon_health_preluminous_compat_warning": "false",
356 "mon_health_preluminous_compat": "true",
357 "mon_timecheck_interval": "60",
358 "mon_sd_reporter_subtree_level": "device",
359 "mon_clock_drift_allowed": "0.1"
362 "osd_mon_heartbeat_interval": "10",
363 "osd_mon_report_interval_min": "1",
364 "osd_mon_report_interval_max": "15"
369 JSON_OVERRIDE_CACHE = """
371 "ceph_conf_overrides": {
373 "mon_max_pg_per_osd": "400",
374 "mon_pg_warn_max_object_skew": "-1",
375 "osd_pool_default_size": "{{ osd_pool_default_size }}",
376 "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
377 "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
378 "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
379 "osd_heartbeat_grace": "3",
380 "osd_heartbeat_interval": "2",
381 "mon_osd_adjust_heartbeat_grace": "false",
382 "bluestore_cache_size": "1073741824",
383 "auth_client_required": "cephx"
386 "mgr_modules": "dashboard"
389 "mon_health_preluminous_compat_warning": "false",
390 "mon_health_preluminous_compat": "true",
391 "mon_timecheck_interval": "60",
392 "mon_sd_reporter_subtree_level": "device",
393 "mon_clock_drift_allowed": "0.1"
396 "osd_mon_heartbeat_interval": "10",
397 "osd_mon_report_interval_min": "1",
398 "osd_mon_report_interval_max": "15"
403 JSON_OVERRIDE_3CONTROLLERS = """
405 "ceph_conf_overrides": {
407 "mon_max_pg_per_osd": "400",
408 "mon_pg_warn_max_object_skew": "-1",
409 "osd_pool_default_size": "{{ osd_pool_default_size }}",
410 "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
411 "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
412 "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
413 "osd_heartbeat_grace": "3",
414 "osd_heartbeat_interval": "2",
415 "mon_osd_adjust_heartbeat_grace": "false",
416 "bluestore_cache_size": "1073741824",
417 "auth_client_required": "cephx"
420 "mgr_modules": "dashboard"
423 "mon_health_preluminous_compat_warning": "false",
424 "mon_health_preluminous_compat": "true",
426 "mon_election_timeout": "2",
427 "mon_lease_renew_interval_factor": "0.4",
428 "mon_lease_ack_timeout_factor": "1.5",
429 "mon_timecheck_interval": "60",
430 "mon_sd_reporter_subtree_level": "device",
431 "mon_clock_drift_allowed": "0.1"
434 "osd_mon_heartbeat_interval": "10",
435 "osd_mon_report_interval_min": "1",
436 "osd_mon_report_interval_max": "15"
444 "public_network": "{{ public_networks }}",
445 "cluster_network": "{{ cluster_networks }}"
451 "os_tuning_params": [{
452 "name": "vm.min_free_kbytes",
458 JSON_OSD_POOL_PGNUMS = """
460 "osd_pool_images_pg_num": "{{ osd_pool_images_pg_num }}",
461 "osd_pool_volumes_pg_num": "{{ osd_pool_volumes_pg_num }}",
462 "osd_pool_vms_pg_num": "{{ osd_pool_vms_pg_num }}",
463 "osd_pool_shared_pg_num": "{{ osd_pool_shared_pg_num }}"{%- if 0 < osd_pool_caas_pg_num %},
464 "osd_pool_caas_pg_num": "{{ osd_pool_caas_pg_num }}"
469 JSON_CEPH_HOSTS = """
471 "ceph-mon": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
472 "ceph-mon_hosts": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
473 "mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
474 "ceph_mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
475 "ceph-osd": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
476 "ceph-osd_hosts": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
477 "osds": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
478 "mgrs": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
479 "ceph-mgr": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ]
482 # "storage_backend": ceph
485 # Replaces variables in /opt/openstack-ansible/playbooks/inventory/group_vars/glance_all.yml
486 JSON_GLANCE_CEPH_ALL_GROUP_VARS = """
488 {% for host in hosts %}
490 "glance_default_store": "rbd",
491 "glance_additional_stores": ["http", "cinder", "file"],
492 "glance_rbd_store_pool": "{{ glance_pool_name }}",
493 "glance_rbd_store_chunk_size": "8",
494 "ceph_conf": "/etc/ceph/ceph.conf"
495 } {% if not loop.last %},{% endif %}
500 JSON_GLANCE_LVM_ALL_GROUP_VARS = """
502 {% for host in hosts %}
504 "glance_default_store": "file"
505 } {% if not loop.last %},{% endif %}
510 # ceph-ansible variables must be set at host_vars -level
511 # ceph-ansible sample variables in group_vars
512 # group_vars - all.yml.sample
513 JSON_CEPH_ANSIBLE_ALL_HOST_VARS = """
515 {% for host in hosts %}
517 "mon_group_name": "mons",
518 "osd_group_name": "osds",
519 "mgr_group_name": "mgrs",
520 "ceph_stable_release": "luminous",
521 "generate_fsid": "true",
523 "journal_size": "10240",
524 "osd_objectstore": "bluestore"
525 } {% if not loop.last %},{% endif %}
530 # pylint: disable=line-too-long
532 # group_vars - mons.yml.sample
533 JSON_CEPH_ANSIBLE_MONS_HOST_VARS = """
535 {% for host in hosts %}
537 "monitor_secret": "{{ '{{ monitor_keyring.stdout }}' }}",
538 "openstack_config": true,
539 "cephkeys_access_group": "cephkeys",
542 "name": "{{ platform_pool }}",
543 "pg_num": "{{ osd_pool_shared_pg_num }}",
545 }{% if is_openstack_deployment %},
547 "name": "{{ glance_pool }}",
548 "pg_num": "{{ osd_pool_images_pg_num }}",
552 "name": "{{ cinder_pool }}",
553 "pg_num": "{{ osd_pool_volumes_pg_num }}",
557 "name": "{{ nova_pool }}",
558 "pg_num": "{{ osd_pool_vms_pg_num }}",
562 {%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
565 "pg_num": "{{ osd_pool_caas_pg_num }}",
573 "key": "$(ceph-authtool --gen-print-key)",
575 "mon_cap": "allow r",
576 "name": "client.shared",
577 "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ platform_pool }}"
578 }{% if is_openstack_deployment %},
581 "key": "$(ceph-authtool --gen-print-key)",
583 "mon_cap": "allow r",
584 "name": "client.glance",
585 "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ glance_pool }}"
589 "key": "$(ceph-authtool --gen-print-key)",
591 "mon_cap": "allow r, allow command \\\\\\\\\\\\\\"osd blacklist\\\\\\\\\\\\\\"",
592 "name": "client.cinder",
593 "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ cinder_pool }}, allow rwx pool={{ nova_pool }}, allow rx pool={{ glance_pool }}"
596 {%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
599 "key": "$(ceph-authtool --gen-print-key)",
601 "mon_cap": "allow r",
602 "name": "client.caas",
603 "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool=caas"
607 } {% if not loop.last %},{% endif %}
611 # pylint: enable=line-too-long
614 # group_vars - osds.yml.sample
615 JSON_CEPH_ANSIBLE_OSDS_HOST_VARS = """
617 {% for host in hosts %}
619 "raw_journal_devices": [],
620 "journal_collocation": true,
621 "raw_multi_journal": false,
622 "dmcrytpt_journal_collocation": false,
623 "dmcrypt_dedicated_journal": false,
624 "osd_scenario": "collocated",
625 "dedicated_devices": []
626 } {% if not loop.last %},{% endif %}
632 JSON_SINGLE_CONTROLLER_VAR = """
634 {% for host in hosts %}
636 "single_controller_host": true
637 } {% if not loop.last %},{% endif %}
650 self.is_rbd_ceph = None
651 self.ceph_osd_disks = []
653 self.cinder_disks = []
654 self.is_controller = False
655 self.is_compute = False
656 self.is_storage = False
657 self.instance_physical_volumes = []
658 self.cinder_physical_volumes = []
659 self.instance_disks = []
660 self.instance_lv_percentage = ""
662 self.osd_disks_ids = []
663 self.vg_percentage = NOT_INSTANCE_NODE_VG_PERCENTAGE
665 self.bare_lvm_disks = None
666 self.is_bare_lvm = None
667 self.bare_lvm_physical_volumes = None
668 self.mount_options = None
669 self.bare_lvm_lv_name = None
672 class storageinventory(cmansibleinventoryconfig.CMAnsibleInventoryConfigPlugin):
674 def __init__(self, confman, inventory, ownhost):
675 super(storageinventory, self).__init__(confman, inventory, ownhost)
677 self.storage_hosts = []
678 self.compute_hosts = []
679 self.controller_hosts = []
683 self.single_node_config = False
684 self._networking_config_handler = self.confman.get_networking_config_handler()
685 self._hosts_config_handler = self.confman.get_hosts_config_handler()
686 self._storage_config_handler = self.confman.get_storage_config_handler()
687 self._openstack_config_handler = self.confman.get_openstack_config_handler()
688 self._sp_config_handler = self.confman.get_storage_profiles_config_handler()
689 self._caas_config_handler = self.confman.get_caas_config_handler()
690 self._ceph_caas_pg_proportion = 0.0
691 self._ceph_openstack_pg_proportion = 0.0
692 self._cinder_pool_name = 'volumes'
693 self._glance_pool_name = 'images'
694 self._nova_pool_name = 'vms'
695 self._platform_pool_name = 'shared'
696 self._storage_profile_attribute_properties = {
697 'lvm_cinder_storage_partitions': {
699 'getter': self._sp_config_handler.get_profile_lvm_cinder_storage_partitions
702 'backends': ['bare_lvm'],
703 'getter': self._sp_config_handler.get_profile_bare_lvm_mount_options
706 'backends': ['bare_lvm'],
707 'getter': self._sp_config_handler.get_profile_bare_lvm_mount_dir
710 'backends': ['bare_lvm'],
711 'getter': self._sp_config_handler.get_profile_bare_lvm_lv_name
713 'nr_of_ceph_osd_disks': {
714 'backends': ['ceph'],
715 'getter': self._sp_config_handler.get_profile_nr_of_ceph_osd_disks
717 'lvm_instance_storage_partitions': {
718 'backends': ['lvm', 'bare_lvm'],
719 'getter': self._sp_config_handler.get_profile_lvm_instance_storage_partitions
721 'lvm_instance_cow_lv_storage_percentage': {
723 'getter': self._sp_config_handler.get_profile_lvm_instance_cow_lv_storage_percentage
725 'openstack_pg_proportion': {
726 'backends': ['ceph'],
727 'getter': self._sp_config_handler.get_profile_ceph_openstack_pg_proportion
729 'caas_pg_proportion': {
730 'backends': ['ceph'],
731 'getter': self._sp_config_handler.get_profile_ceph_caas_pg_proportion
735 def _is_host_managment(self, host):
736 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_management_service_profile(), host)
738 def _is_host_controller(self, host):
739 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_controller_service_profile(), host)
741 def _is_profile_in_hosts_profiles(self, profile, host):
742 node_service_profiles = self._hosts_config_handler.get_service_profiles(host)
743 return profile in node_service_profiles
745 def _is_host_compute(self, host):
746 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_compute_service_profile(), host)
748 def _is_host_caas_master(self, host):
749 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_caasmaster_service_profile(), host)
751 def _is_host_storage(self, host):
752 return self._is_profile_in_hosts_profiles(profiles.Profiles.get_storage_service_profile(), host)
754 def _is_controller_has_compute(self):
755 if set.intersection(set(self.compute_hosts), set(self.controller_hosts)):
759 def _is_collocated_controller_node_config(self):
760 if set.intersection(set(self.storage_hosts), set(self.controller_hosts)):
764 def _is_collocated_3controllers_config(self):
765 if (self._is_collocated_controller_node_config() and
766 (len(self.controller_hosts) == 3) and (len(self.hosts) == 3)):
770 def _is_dedicated_storage_config(self):
771 collocated_config = set.intersection(set(self.storage_hosts), set(self.controller_hosts))
772 if collocated_config and (collocated_config == set(self.controller_hosts)):
774 elif self.storage_hosts:
779 def handle_bootstrapping(self):
780 self.handle('bootstrapping')
782 def handle_provisioning(self):
783 self.handle('provisioning')
785 def handle_postconfig(self):
786 self.handle('postconfig')
788 def handle_setup(self):
791 def _template_and_add_vars_to_hosts(self, template, **variables):
793 text = Environment().from_string(template).render(variables)
795 self._add_vars_for_hosts(text)
796 except Exception as exp:
797 raise cmerror.CMError(str(exp))
799 def _add_vars_for_hosts(self, inventory_text):
800 inventory = json.loads(inventory_text)
801 for host in inventory.keys():
802 for var, value in inventory[host].iteritems():
803 self.add_host_var(host, var, value)
806 def _read_cinder_ceph_client_uuid():
807 if os.path.isfile(USER_SECRETS):
808 d = dict(line.split(':', 1) for line in open(USER_SECRETS))
809 cinder_ceph_client_uuid = d['cinder_ceph_client_uuid'].strip()
810 return cinder_ceph_client_uuid
812 raise cmerror.CMError("The file {} does not exist.".format(USER_SECRETS))
814 def _add_cinder_backends(self):
815 self._template_and_add_vars_to_hosts(
816 JSON_CINDER_BACKENDS_HOST_VAR,
817 hosts=self.controller_hosts,
818 installation_controller_ip=self._installation_host_ip,
819 cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
820 openstack_storage=self._openstack_config_handler.get_storage_backend(),
821 cinder_pool_name=self._cinder_pool_name)
823 def _add_external_ceph_cinder_backends(self):
824 handler = self._storage_config_handler
825 self._template_and_add_vars_to_hosts(
826 JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR,
828 cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
829 ext_ceph_user=handler.get_ext_ceph_ceph_user(),
830 ext_ceph_user_key=handler.get_ext_ceph_ceph_user_key(),
831 ext_ceph_fsid=handler.get_ext_ceph_fsid(),
832 ext_ceph_mon_hosts=", ".join(handler.get_ext_ceph_mon_hosts()),
833 nova_pool_name=self._nova_pool_name,
834 glance_pool_name=self._glance_pool_name,
835 cinder_pool_name=self._cinder_pool_name,
836 platform_pool_name=self._platform_pool_name)
838 def _add_storage_nodes_configs(self):
840 for host in self.hosts:
842 rbdhosts.append(host)
843 self._template_and_add_vars_to_hosts(JSON_STORAGE_HOST_VAR, hosts=rbdhosts)
845 def _add_hdd_storage_configs(self):
846 self._template_and_add_vars_to_hosts(
847 JSON_STORAGE_HOST_DISK_CONFIGURATION,
849 rootdisk_device=DEFAULT_ROOTDISK_DEVICE)
851 def _add_lvm_storage_configs(self):
852 self._template_and_add_vars_to_hosts(JSON_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
854 def _add_bare_lvm_storage_configs(self):
855 self._template_and_add_vars_to_hosts(JSON_BARE_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
857 def _add_instance_devices(self):
858 self._template_and_add_vars_to_hosts(JSON_DEVICE_HOST_VAR, hosts=self.compute_hosts)
860 def _add_ceph_hosts(self):
861 self._add_host_group(
862 Environment().from_string(JSON_CEPH_HOSTS).render(
863 mons=self._mon_hosts,
864 osds=self._osd_hosts,
865 mgrs=self._mgr_hosts))
867 self._add_global_parameters(
868 Environment().from_string(JSON_CEPH_HOSTS).render(
869 mons=self._mon_hosts,
870 osds=self._osd_hosts,
871 mgrs=self._mgr_hosts))
873 def _add_glance(self):
874 if self.is_ceph_backend:
875 self._template_and_add_vars_to_hosts(
876 JSON_GLANCE_CEPH_ALL_GROUP_VARS,
878 glance_pool_name=self._glance_pool_name)
879 elif self.is_lvm_backend:
880 self._template_and_add_vars_to_hosts(JSON_GLANCE_LVM_ALL_GROUP_VARS, hosts=self.hosts)
882 def _add_ceph_ansible_all_sample_host_vars(self):
883 self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_ALL_HOST_VARS, hosts=self.hosts)
885 def _add_ceph_ansible_mons_sample_host_vars(self):
886 self._template_and_add_vars_to_hosts(
887 JSON_CEPH_ANSIBLE_MONS_HOST_VARS,
889 **self._get_ceph_vars())
891 def _get_ceph_vars(self):
893 'osd_pool_images_pg_num': self._calculated_images_pg_num,
894 'osd_pool_volumes_pg_num': self._calculated_volumes_pg_num,
895 'osd_pool_vms_pg_num': self._calculated_vms_pg_num,
896 'osd_pool_shared_pg_num': self._calculated_shared_pg_num,
897 'osd_pool_caas_pg_num': self._calculated_caas_pg_num,
898 'is_openstack_deployment': self._is_openstack_deployment,
899 'is_caas_deployment': self._is_caas_deployment,
900 'is_hybrid_deployment': self._is_hybrid_deployment,
901 'nova_pool': self._nova_pool_name,
902 'glance_pool': self._glance_pool_name,
903 'cinder_pool': self._cinder_pool_name,
904 'platform_pool': self._platform_pool_name
907 def _add_ceph_ansible_osds_sample_host_vars(self):
908 self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_OSDS_HOST_VARS, hosts=self.hosts)
911 if self.is_external_ceph_backend:
912 nova_ceph_client = self._storage_config_handler.get_ext_ceph_ceph_user()
914 nova_ceph_client = 'cinder'
916 self._template_and_add_vars_to_hosts(
917 JSON_NOVA_RBD_HOST_VAR, hosts=self.compute_hosts,
918 nova_pool_name=self._nova_pool_name,
919 nova_ceph_client=nova_ceph_client)
921 def _add_single_controller_host_var(self):
922 self._template_and_add_vars_to_hosts(
923 JSON_SINGLE_CONTROLLER_VAR, hosts=self.controller_hosts)
925 def _add_global_parameters(self, text):
927 inventory = json.loads(text)
928 for var, value in inventory.iteritems():
929 self.add_global_var(var, value)
930 except Exception as exp:
931 raise cmerror.CMError(str(exp))
933 def _add_host_group(self, text):
935 inventory = json.loads(text)
936 for var, value in inventory.iteritems():
937 self.add_host_group(var, value)
938 except Exception as exp:
939 raise cmerror.CMError(str(exp))
942 def cluster_network_cidrs(self):
944 network = self._networking_config_handler.get_infra_storage_cluster_network_name()
945 for domain in self._networking_config_handler.get_network_domains(network):
946 cidrs.append(self._networking_config_handler.get_network_cidr(network, domain))
947 return ','.join(cidrs)
950 def public_network_cidrs(self):
952 cluster_network = self._networking_config_handler.get_infra_storage_cluster_network_name()
953 public_network = self._networking_config_handler.get_infra_internal_network_name()
954 for domain in self._networking_config_handler.get_network_domains(cluster_network):
955 cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
956 for host in self._mon_hosts:
957 domain = self._hosts_config_handler.get_host_network_domain(host.name)
958 cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
959 return ','.join(cidrs)
961 def _add_networks(self):
962 self._add_global_parameters(
963 Environment().from_string(JSON_NETWORK).render(
964 public_networks=self.public_network_cidrs,
965 cluster_networks=self.cluster_network_cidrs))
967 def _add_monitor_address(self):
968 infra_storage_network = self._networking_config_handler.get_infra_internal_network_name()
969 for host in self._mon_hosts:
971 self._networking_config_handler.get_host_ip(host.name, infra_storage_network)
972 self.add_host_var(host.name, "monitor_address", monitor_address)
974 def _add_override_settings(self):
975 ceph_osd_pool_size = self._storage_config_handler.get_ceph_osd_pool_size()
977 if self._is_collocated_3controllers_config():
978 self._add_global_parameters(
979 Environment().from_string(JSON_OVERRIDE_3CONTROLLERS).render(
980 osd_pool_default_size=ceph_osd_pool_size,
981 osd_pool_default_min_size=str(ceph_osd_pool_size-1),
982 osd_pool_default_pg_num=self._calculated_default_pg_num))
984 self._add_global_parameters(
985 Environment().from_string(JSON_OS_TUNING).render())
987 elif self._is_controller_has_compute():
988 self._add_global_parameters(
989 Environment().from_string(JSON_OVERRIDE_CACHE).render(
990 osd_pool_default_size=ceph_osd_pool_size,
991 osd_pool_default_min_size=str(ceph_osd_pool_size-1),
992 osd_pool_default_pg_num=self._calculated_default_pg_num))
994 self._add_global_parameters(
995 Environment().from_string(JSON_OS_TUNING).render())
997 self._add_global_parameters(
998 Environment().from_string(JSON_OVERRIDE).render(
999 osd_pool_default_size=ceph_osd_pool_size,
1000 osd_pool_default_min_size=str(ceph_osd_pool_size-1),
1001 osd_pool_default_pg_num=self._calculated_default_pg_num))
1003 def _calculate_pg_num(self, pool_data_percentage):
1004 pgnum = PGNum(self._total_number_of_osds,
1005 pool_data_percentage,
1006 self._number_of_replicas)
1007 return pgnum.calculate()
1010 def _calculated_default_pg_num(self):
1011 return self._calculate_pg_num(self._pool_data_percentage)
1014 def _calculated_volumes_pg_num(self):
1015 return self._calculate_pg_num(
1016 OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1019 def _calculated_images_pg_num(self):
1020 return self._calculate_pg_num(
1021 OSD_POOL_IMAGES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1024 def _calculated_vms_pg_num(self):
1025 return self._calculate_pg_num(
1026 OSD_POOL_VMS_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1029 def _calculated_shared_pg_num(self):
1030 return self._calculate_pg_num(
1031 OSD_POOL_SHARED_PG_NUM_PERCENTAGE)
1034 def _calculated_caas_pg_num(self):
1035 if self._ceph_caas_pg_proportion > 0:
1036 return self._calculate_pg_num(
1037 (OSD_POOL_CAAS_PG_NUM_PERCENTAGE - OSD_POOL_SHARED_PG_NUM_PERCENTAGE) *
1038 self._ceph_caas_pg_proportion)
1041 def _add_osd_pool_pg_nums(self):
1042 self._add_global_parameters(
1043 Environment().from_string(JSON_OSD_POOL_PGNUMS).render(**self._get_ceph_vars()))
1046 def _installation_host(self):
1047 return self._hosts_config_handler.get_installation_host()
1050 def _infra_internal_network_name(self):
1051 return self._networking_config_handler.get_infra_internal_network_name()
1054 def _installation_host_ip(self):
1055 return self._networking_config_handler.get_host_ip(
1056 self._installation_host, self._infra_internal_network_name)
1059 def is_ceph_backend(self):
1060 return self._storage_config_handler.is_ceph_enabled()
1063 def is_external_ceph_backend(self):
1064 return (self._storage_config_handler.is_external_ceph_enabled() and
1065 self._ceph_is_openstack_storage_backend)
1067 def _set_external_ceph_pool_names(self):
1068 if self.is_external_ceph_backend:
1069 h = self._storage_config_handler
1070 self._nova_pool_name = h.get_ext_ceph_nova_pool()
1071 self._cinder_pool_name = h.get_ext_ceph_cinder_pool()
1072 self._glance_pool_name = h.get_ext_ceph_glance_pool()
1073 self._platform_pool_name = h.get_ext_ceph_platform_pool()
1076 def _lvm_is_openstack_storage_backend(self):
1077 return True if self._openstack_config_handler.get_storage_backend() == 'lvm' else False
1080 def _ceph_is_openstack_storage_backend(self):
1081 return True if self._openstack_config_handler.get_storage_backend() == 'ceph' else False
1084 def is_lvm_backend(self):
1085 return (self._storage_config_handler.is_lvm_enabled() and
1086 self._lvm_is_openstack_storage_backend)
1089 def instance_default_backend(self):
1090 return self._openstack_config_handler.get_instance_default_backend()
1093 def _hosts_with_ceph_storage_profile(self):
1094 # return filter(lambda host: host.is_rbd, self.hosts)
1095 return [host for host in self.hosts if host.is_rbd_ceph]
1098 def _is_openstack_deployment(self):
1099 return self._caas_config_handler.is_openstack_deployment()
1102 def _is_caas_deployment(self):
1103 return self._caas_config_handler.is_caas_deployment()
1106 def _is_hybrid_deployment(self):
1107 return self._caas_config_handler.is_hybrid_deployment()
1109 def handle(self, phase):
1110 self._init_jinja_environment()
1111 self.add_global_var("external_ceph_configured", self.is_external_ceph_backend)
1112 self.add_global_var("ceph_configured", self.is_ceph_backend)
1113 self.add_global_var("lvm_configured", self.is_lvm_backend)
1114 if phase == 'bootstrapping':
1115 self._add_hdd_storage_configs()
1117 self._add_hdd_storage_configs()
1118 if self.is_external_ceph_backend:
1119 self._set_external_ceph_pool_names()
1120 self._add_external_ceph_cinder_backends()
1122 if self._is_openstack_deployment:
1123 self._add_cinder_backends()
1126 ceph_hosts = self._hosts_with_ceph_storage_profile
1128 self._set_ceph_pg_proportions(ceph_hosts)
1129 self._add_ceph_ansible_all_sample_host_vars()
1130 self._add_ceph_ansible_mons_sample_host_vars()
1131 self._add_ceph_ansible_osds_sample_host_vars()
1132 self._add_ceph_hosts()
1133 self._add_storage_nodes_configs()
1134 self._add_monitor_address()
1135 self._add_override_settings()
1136 self._add_osd_pool_pg_nums()
1137 self._add_networks()
1138 self.add_global_var("cinder_ceph_client_uuid", self._read_cinder_ceph_client_uuid())
1139 if self.is_lvm_backend:
1140 self._add_lvm_storage_configs()
1141 self._add_bare_lvm_storage_configs()
1143 self.add_global_var("instance_default_backend", self.instance_default_backend)
1144 self.add_global_var("storage_single_node_config", self.single_node_config)
1145 self.add_global_var("one_controller_node_config", self._is_one_controller_node_config)
1146 if self._is_one_controller_node_config:
1147 self._add_single_controller_host_var()
1148 self.add_global_var("collocated_controller_node_config",
1149 self._is_collocated_controller_node_config())
1150 self.add_global_var("dedicated_storage_node_config",
1151 self._is_dedicated_storage_config())
1152 self.add_global_var("storage_one_controller_multi_nodes_config",
1153 self._is_one_controller_multi_nodes_config)
1154 if self.instance_default_backend == 'rbd':
1156 elif self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1157 self._add_instance_devices()
1159 def _set_ceph_pg_proportions(self, ceph_hosts):
1160 # FIXME: First storage host's storage profile assumed to get pg proportion values
1161 hostname = ceph_hosts[0].name
1162 if self._is_hybrid_deployment:
1163 self._ceph_openstack_pg_proportion = self._get_ceph_openstack_pg_proportion(hostname)
1164 self._ceph_caas_pg_proportion = self._get_ceph_caas_pg_proportion(hostname)
1165 elif self._is_openstack_deployment:
1166 self._ceph_openstack_pg_proportion = 1.0
1167 self._ceph_caas_pg_proportion = 0.0
1168 elif self._is_caas_deployment:
1169 self._ceph_openstack_pg_proportion = 0.0
1170 self._ceph_caas_pg_proportion = 1.0
1172 def _init_host_data(self):
1173 hosts = self._hosts_config_handler.get_enabled_hosts()
1174 self.single_node_config = True if len(hosts) == 1 else False
1176 host = self._initialize_host_object(name)
1177 self.hosts.append(host)
1179 self._osd_hosts.append(host)
1181 self._mon_hosts.append(host)
1183 self._mgr_hosts.append(host)
1185 for host in self.hosts:
1187 self.compute_hosts.append(host)
1188 if host.is_controller:
1189 self.controller_hosts.append(host)
1191 self.storage_hosts.append(host)
1194 def _number_of_osd_hosts(self):
1195 return len(self._osd_hosts)
1198 def _is_one_controller_multi_nodes_config(self):
1199 if len(self.controller_hosts) == 1 and not self.single_node_config:
1204 def _is_one_controller_node_config(self):
1205 if len(self.controller_hosts) == 1:
1210 def _number_of_osds_per_host(self):
1211 first_osd_host = self._osd_hosts[0].name
1212 return self._get_nr_of_ceph_osd_disks(first_osd_host)
1215 def _total_number_of_osds(self):
1216 return self._number_of_osds_per_host * self._number_of_osd_hosts
1219 def _number_of_pools(self):
1220 """TODO: Get dynamically"""
1221 return NUMBER_OF_POOLS
1224 def _pool_data_percentage(self):
1225 return float(1.0 / self._number_of_pools)
1228 def _number_of_replicas(self):
1229 num = self._storage_config_handler.get_ceph_osd_pool_size()
1230 return 2 if num == 0 else num
1232 def _init_jinja_environment(self):
1233 self._init_host_data()
1235 def _is_backend_configured(self, backend, host_name):
1237 if self._get_storage_profile_for_backend(host_name, backend):
1240 except configerror.ConfigError:
1243 def _get_storage_profile_for_backend(self, host_name, *backends):
1244 storage_profiles = self._hosts_config_handler.get_storage_profiles(host_name)
1245 sp_handler = self._sp_config_handler
1246 for storage_profile in storage_profiles:
1247 if sp_handler.get_profile_backend(storage_profile) in backends:
1248 return storage_profile
1251 def _get_nr_of_ceph_osd_disks(self, host_name):
1252 return self._get_storage_profile_attribute(host_name, 'nr_of_ceph_osd_disks')
1254 def _get_storage_profile_attribute(self, host_name, attribute):
1255 attribute_properties = self._storage_profile_attribute_properties[attribute]
1256 storage_profile = self._get_storage_profile_for_backend(host_name,
1257 *attribute_properties['backends'])
1259 return attribute_properties['getter'](storage_profile)
1260 raise cmerror.CMError(str("Failed to get %s" % attribute))
1262 def _get_ceph_openstack_pg_proportion(self, host_name):
1263 return self._get_storage_profile_attribute(host_name, 'openstack_pg_proportion')
1265 def _get_ceph_caas_pg_proportion(self, host_name):
1266 return self._get_storage_profile_attribute(host_name, 'caas_pg_proportion')
1268 def _get_lvm_instance_storage_partitions(self, host_name):
1270 if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1271 return self._get_storage_profile_attribute(
1272 host_name, 'lvm_instance_storage_partitions')
1273 except configerror.ConfigError:
1276 if self.instance_default_backend not in ALL_DEFAULT_INSTANCE_BACKENDS:
1277 raise cmerror.CMError(
1278 str("Unknown instance_default_backend %s "
1279 "not supported" % self.instance_default_backend))
1282 def _get_lvm_cinder_storage_partitions(self, host_name):
1283 return self._get_storage_profile_attribute(host_name, 'lvm_cinder_storage_partitions')
1285 def _get_bare_lvm_mount_options(self, host_name):
1286 return self._get_storage_profile_attribute(host_name, 'mount_options')
1288 def _get_bare_lvm_mount_dir(self, host_name):
1289 return self._get_storage_profile_attribute(host_name, 'mount_dir')
1291 def _get_bare_lvm_lv_name(self, host_name):
1292 return self._get_storage_profile_attribute(host_name, 'lv_name')
1294 def _get_instance_lv_percentage(self, host_name):
1296 if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1297 return self._get_storage_profile_attribute(
1298 host_name, 'lvm_instance_cow_lv_storage_percentage')
1299 except configerror.ConfigError:
1300 return DEFAULT_INSTANCE_LV_PERCENTAGE
1301 raise cmerror.CMError(str("Failed to found lvm from storage_profiles"))
1303 def _is_osd_host(self, name):
1305 return bool(name in self._hosts_config_handler.get_service_profile_hosts('storage'))
1306 except configerror.ConfigError:
1309 def _is_rbd_ceph_configured(self, host_name):
1310 return self._is_backend_configured('ceph', host_name)
1312 def _is_lvm_configured(self, host_name):
1313 return self._is_backend_configured('lvm', host_name)
1315 def _is_bare_lvm_configured(self, host_name):
1316 return self._is_backend_configured('bare_lvm', host_name)
1318 def _get_hw_type(self, name):
1319 hwmgmt_addr = self._hosts_config_handler.get_hwmgmt_ip(name)
1320 hwmgmt_user = self._hosts_config_handler.get_hwmgmt_user(name)
1321 hwmgmt_pass = self._hosts_config_handler.get_hwmgmt_password(name)
1322 hwmgmt_priv_level = self._hosts_config_handler.get_hwmgmt_priv_level(name)
1323 return hw.get_hw_type(hwmgmt_addr, hwmgmt_user, hwmgmt_pass, hwmgmt_priv_level)
1326 def _get_os_disk(hw_type):
1327 return hw.get_os_hd(hw_type)
1329 def _get_osd_disks_for_embedded_deployment(self, host_name):
1330 return self._hosts_config_handler.get_ceph_osd_disks(host_name)
1333 def _get_osd_disks(hw_type):
1334 return hw.get_hd_with_usage(hw_type, "osd")
1336 def _by_path_disks(self, hw_type, nr_of_disks):
1337 return self._get_osd_disks(hw_type)[0:nr_of_disks]
1340 def _is_by_path_disks(disk_list):
1341 return [disk for disk in disk_list if "by-path" in disk]
1343 def _get_physical_volumes(self, disk_list):
1345 if self._is_by_path_disks(disk_list):
1346 return [disk+"-part"+partition_nr for disk in disk_list]
1348 return [disk+partition_nr for disk in disk_list]
1350 def _initialize_host_object(self, name):
1353 host.is_mgr = self._is_host_managment(host.name)
1354 host.is_controller = self._is_host_controller(host.name)
1355 host.is_compute = self._is_host_compute(host.name)
1356 host.is_storage = self._is_host_storage(host.name)
1357 host.is_rbd_ceph = self._is_rbd_ceph_configured(host.name)
1358 host.is_lvm = self._is_lvm_configured(host.name)
1359 host.is_bare_lvm = self._is_bare_lvm_configured(host.name)
1360 host.is_osd = self._is_osd_host(host.name)
1361 host.is_mon = host.is_mgr
1362 hw_type = self._get_hw_type(name)
1363 host.os_disk = self._get_os_disk(hw_type)
1364 if host.is_bare_lvm:
1365 partitions = self._get_lvm_instance_storage_partitions(host.name)
1366 host.bare_lvm_disks = self._by_path_disks(hw_type, len(partitions))
1367 host.bare_lvm_physical_volumes = self._get_physical_volumes(host.bare_lvm_disks)
1368 host.mount_options = self._get_bare_lvm_mount_options(host.name)
1369 host.mount_dir = self._get_bare_lvm_mount_dir(host.name)
1370 host.bare_lvm_lv_name = self._get_bare_lvm_lv_name(host.name)
1372 if host.is_compute and self.instance_default_backend != 'rbd':
1373 host.vg_percentage = INSTANCE_NODE_VG_PERCENTAGE
1375 if self.is_lvm_backend and host.is_controller:
1376 nr_of_cinder_disks = int(len(self._get_lvm_cinder_storage_partitions(host.name)))
1377 nr_of_nova_disks = int(len(self._get_lvm_instance_storage_partitions(host.name)))
1378 nr_of_all_disks = nr_of_cinder_disks + nr_of_nova_disks
1379 if nr_of_nova_disks > 0:
1380 host.cinder_disks = \
1381 self._by_path_disks(hw_type, nr_of_all_disks)[-nr_of_cinder_disks:]
1383 host.cinder_disks = self._by_path_disks(hw_type, nr_of_cinder_disks)
1384 host.cinder_physical_volumes = self._get_physical_volumes(host.cinder_disks)
1386 if host.is_rbd_ceph:
1387 nr_of_osd_disks = self._get_nr_of_ceph_osd_disks(host.name)
1388 if self._caas_config_handler.is_vnf_embedded_deployment():
1389 host.ceph_osd_disks = \
1390 self._get_osd_disks_for_embedded_deployment(host.name)[0:nr_of_osd_disks]
1392 host.ceph_osd_disks = self._get_osd_disks(hw_type)[0:nr_of_osd_disks]
1393 host.osd_disks_ids = range(1, nr_of_osd_disks+1)
1395 if host.is_lvm and host.is_compute:
1396 partitions = self._get_lvm_instance_storage_partitions(host.name)
1397 host.instance_disks = self._by_path_disks(hw_type, len(partitions))
1398 host.instance_physical_volumes = self._get_physical_volumes(host.instance_disks)
1399 host.instance_lv_percentage = self._get_instance_lv_percentage(host.name)