73a75b0531e0b19ec27616c1598867535de77cad
[ta/cm-plugins.git] / inventoryhandlers / storageinventory / storageinventory.py
1 # Copyright 2019 Nokia
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #    http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # pylint: disable=missing-docstring,invalid-name,too-few-public-methods,too-many-instance-attributes,too-many-lines
16 import os
17 import json
18 from jinja2 import Environment
19 from cmframework.apis import cmansibleinventoryconfig
20 from cmframework.apis import cmerror
21 from cmdatahandlers.api import configerror
22 from serviceprofiles import profiles
23 import hw_detector.hw_detect_lib as hw
24
25
26 import math
27
28 NEAREST_POWER_OF_2_PERCENTAGE = 0.25
29
30 TARGET_PGS_PER_OSD_NO_INCREASE_EXPECTED = 100
31 TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED = 200
32 TARGET_PGS_PER_OSD_TWO_TO_THREE_TIMES_SIZE_INCREASE_EXPECTED = 300
33 # Please visit ceph.com/pgcalc for details on previous values
34
35 MINIMUM_PG_NUM = 32
36
37
38 class PGNum(object):
39     """Calculates the pg_num for the given attributes."""
40
41     def __init__(self, number_of_pool_osds, pool_data_percentage, number_of_replicas):
42         self._number_of_pool_osds = number_of_pool_osds
43         self._pool_data_percentage = pool_data_percentage
44         self._number_of_replicas = number_of_replicas
45
46     @staticmethod
47     def _round_up_to_closest_power_of_2(num):
48         """Smallest power of 2 greater than or equal to num."""
49         return 2**(num-1).bit_length() if num > 0 else 1
50
51     @staticmethod
52     def _round_down_to_closest_power_of_2(num):
53         """Largest power of 2 less than or equal to num."""
54         return 2**(num.bit_length()-1) if num > 0 else 1
55
56     @staticmethod
57     def _check_percentage_of_values(diff_to_lower, org_pgnum):
58         """ If the nearest power of 2 is more than 25% below the original value,
59         the next higher power of 2 is used. Please visit ceph.com/pgcalc
60         """
61         return float(float(diff_to_lower) / float(org_pgnum)) > NEAREST_POWER_OF_2_PERCENTAGE
62
63     def _rounded_pgnum_to_the_nearest_power_of_2(self, pgnum):
64         higher_power = self._round_up_to_closest_power_of_2(pgnum)
65         lower_power = self._round_down_to_closest_power_of_2(pgnum)
66         diff_to_lower = pgnum - lower_power
67         if pgnum != 0 and self._check_percentage_of_values(diff_to_lower, pgnum):
68             return higher_power
69         return lower_power
70
71     def _calculate_pg_num_formula(self, number_of_pool_osds, pool_percentage):
72         return TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED \
73                * number_of_pool_osds * float(pool_percentage) / self._number_of_replicas
74
75     def _select_pgnum_formula_result(self, number_of_pool_osds, pool_percentage):
76         pgnum = self._calculate_pg_num_formula(number_of_pool_osds, pool_percentage)
77         return int(math.ceil(max(pgnum, MINIMUM_PG_NUM)))
78
79     def calculate(self):
80         """ The formula of the calculation can be found from ceph.com/pgcalc.
81
82             pgnum = (target_pgs x number_of_osds_in_pool x pool_percentage)/number_of_replicas
83             return : rounded pgnum to the nearest power of 2
84
85         """
86         pgnum = self._select_pgnum_formula_result(
87             self._number_of_pool_osds, self._pool_data_percentage)
88         return self._rounded_pgnum_to_the_nearest_power_of_2(pgnum)
89
90
91 NUMBER_OF_POOLS = 4
92 SUPPORTED_INSTANCE_BACKENDS = ['default', 'cow', 'lvm']
93 ALL_DEFAULT_INSTANCE_BACKENDS = SUPPORTED_INSTANCE_BACKENDS + ['rbd']
94
95 DEFAULT_INSTANCE_LV_PERCENTAGE = "100"
96
97 USER_SECRETS = "/etc/openstack_deploy/user_secrets.yml"
98
99 # Ceph PG share percentages for Openstack pools
100 OSD_POOL_IMAGES_PG_NUM_PERCENTAGE = 0.09
101 OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE = 0.69
102 OSD_POOL_VMS_PG_NUM_PERCENTAGE = 0.20
103 OSD_POOL_SHARED_PG_NUM_PERCENTAGE = 0.02
104 # Ceph PG share percentages for CaaS pools
105 OSD_POOL_CAAS_PG_NUM_PERCENTAGE = 1.0
106
107 DEFAULT_ROOTDISK_DEVICE = "/dev/sda"
108 # root disk partition 2 system volume group VG percentages
109 INSTANCE_NODE_VG_PERCENTAGE = 0.47
110 NOT_INSTANCE_NODE_VG_PERCENTAGE = 1
111 """
112 /dev/sda1 fixed partition size : 50GiB fixed size = 10% of the total disk size
113 /dev/sda2 system VG partition size: 47% of remaining total disk size = 42% of total disk size
114 /dev/sda3 instance partition size 53% of remaining total disk size = 47% of total disk size
115 """
116
117
118 JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR = """
119 {
120     {% for host in hosts %}
121     "{{ host.name }}": {
122         "ext_ceph_user": "{{ ext_ceph_user }}",
123         "ext_ceph_user_key": "{{ ext_ceph_user_key }}",
124         "cephkeys_access_group": "cephkeys",
125
126         "ceph_mons": [
127             {% for host in hosts %}
128                 "{{ host.name }}"
129                 {% if not loop.last %},{% endif %}
130             {% endfor %}],
131
132         "ext_ceph_fsid": "{{ ext_ceph_fsid }}",
133         "ext_ceph_mon_hosts": "{{ ext_ceph_mon_hosts }}",
134
135         "cinder_service_hostname": "{{ host.name }}",
136         "cinder_backends": {
137             "rbd": {
138                 "volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
139                 "rbd_pool": "{{ cinder_pool_name }}",
140                 "rbd_ceph_conf": "/etc/ceph/ceph.conf",
141                 "ceph_conf": "/etc/ceph/ceph.conf",
142                 "rbd_flatten_volume_from_snapshot": "false",
143                 "rbd_max_clone_depth": "5",
144                 "rbd_store_chunk_size": "4",
145                 "rados_connect_timeout": "-1",
146                 "volume_backend_name": "RBD",
147                 "rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
148                 "rbd_user": "{{ ext_ceph_user }}",
149                 "backend_host": "controller",
150                 "rbd_exclusive_cinder_pool": "True"
151             }
152         },
153
154         "ext_openstack_pools": [
155             "{{ glance_pool_name }}",
156             "{{ cinder_pool_name }}",
157             "{{ nova_pool_name }}",
158             "{{ platform_pool_name }}"
159         ],
160
161         "cinder_ceph_client": "{{ ext_ceph_user }}",
162         "nova_ceph_client": "{{ ext_ceph_user }}",
163
164         "glance_default_store": "rbd",
165         "glance_additional_stores": ["http", "cinder", "file"],
166         "glance_rbd_store_pool": "{{ glance_pool_name }}",
167         "glance_rbd_store_chunk_size": "8",
168         "glance_ceph_client": "{{ ext_ceph_user }}",
169         "ceph_conf": "/etc/ceph/ceph.conf"
170
171     } {% if not loop.last %},{% endif %}
172     {% endfor %}
173 }
174 """
175
176 JSON_CINDER_BACKENDS_HOST_VAR = """
177 {
178     {%- set loopvar = {'first_entry': True} %}
179     {% for host in hosts %}
180     {% if host.is_controller %}
181     {%- if not loopvar.first_entry %},{%- endif %}
182     {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
183     "{{ host.name }}": {
184         "cinder_service_hostname": "{{ host.name }}",
185         "cinder_backends": {
186             {% if openstack_storage == 'ceph' %}
187             "rbd": {
188                 "volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
189                 "rbd_pool": "{{ cinder_pool_name }}",
190                 "rbd_ceph_conf": "/etc/ceph/ceph.conf",
191                 "ceph_conf": "/etc/ceph/ceph.conf",
192                 "rbd_flatten_volume_from_snapshot": "false",
193                 "rbd_max_clone_depth": "5",
194                 "rbd_store_chunk_size": "4",
195                 "rados_connect_timeout": "-1",
196                 "volume_backend_name": "volumes_hdd",
197                 "rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
198                 "rbd_user": "cinder",
199                 "backend_host": "controller",
200                 "rbd_exclusive_cinder_pool": "True"
201             }
202             {% endif %}
203             {% if openstack_storage == 'lvm' %}
204             "lvm": {
205                 "iscsi_ip_address": "{{ installation_controller_ip }}",
206                 "volume_backend_name": "LVM_iSCSI",
207                 "volume_driver": "cinder.volume.drivers.lvm.LVMVolumeDriver",
208                 "volume_group": "cinder-volumes"
209             }
210             {% endif %}
211         }
212     }
213     {% endif %}
214     {% endfor %}
215 }
216 """
217
218 JSON_STORAGE_HOST_VAR = """
219 {
220     {%- set loopvar = {'first_entry': True} %}
221     {% for host in hosts %}
222     {% if host.is_rbd_ceph %}
223     {%- if not loopvar.first_entry %},{%- endif %}
224     {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
225     "{{ host.name }}": {
226          "devices": [
227              {% for disk in host.ceph_osd_disks %}
228                  "{{disk}}"
229                  {%if not loop.last %},{% endif %}{% endfor %}]
230     }
231     {% endif %}
232     {% endfor %}
233 }
234 """
235
236 JSON_STORAGE_HOST_DISK_CONFIGURATION = """
237 {
238     {% for host in hosts %}
239     "{{ host.name }}": {
240          "by_path_disks":
241              { "os" : "{{ host.os_disk }}",
242                "osd" : "{{ host.ceph_osd_disks }}",
243                "osd_disks_ids" : "{{ host.osd_disks_ids }}"
244              },
245          "rootdisk_vg_percentage": "{{ host.vg_percentage }}",
246          "default_rootdisk_device": "{{ rootdisk_device }}"
247     } {% if not loop.last %},{% endif %}
248     {% endfor %}
249 }
250 """
251
252
253 JSON_LVM_STORAGE_HOST_VAR = """
254 {
255     {% for host in hosts %}
256     "{{ host.name }}": {
257          "devices": [
258              {% for disk in host.cinder_disks %}
259              "{{disk}}"
260              {%if not loop.last %},{% endif %}{% endfor %}],
261          "cinder_physical_volumes": [
262              {% for disk in host.cinder_physical_volumes %}
263              "{{disk}}"
264              {%if not loop.last %},{% endif %}{% endfor %}]
265     } {% if not loop.last %},{% endif %}
266     {% endfor %}
267 }
268 """
269
270
271 JSON_BARE_LVM_STORAGE_HOST_VAR = """
272 {
273     {% for host in hosts %}
274     "{{ host.name }}": {
275         {% if host.is_bare_lvm %}
276         "bare_lvm": {
277             "disks": [
278                 {% for disk in host.bare_lvm_disks %}
279                     "{{disk}}"
280                     {%if not loop.last %},{% endif %}{% endfor %}],
281             "physical_volumes": [
282                 {% for disk in host.bare_lvm_physical_volumes %}
283                     "{{disk}}"
284                     {%if not loop.last %},{% endif %}{% endfor %}],
285             "mount_options": "{{ host.mount_options }}",
286             "mount_dir": "{{ host.mount_dir }}",
287             "name": "{{ host.bare_lvm_lv_name }}"
288         }
289         {% endif %}
290     } {% if not loop.last %},{% endif %}
291     {% endfor %}
292 }
293 """
294
295 JSON_DEVICE_HOST_VAR = """
296 {
297     {%- set loopvar = {'first_entry': True} %}
298     {% for host in hosts %}
299     {% if host.instance_physical_volumes %}
300     {%- if not loopvar.first_entry %},{%- endif %}
301     {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
302     "{{ host.name }}": {
303          "instance_disks": [
304              {% for disk in host.instance_disks %}
305                  "{{disk}}"
306                  {%if not loop.last %},{% endif %}
307              {% endfor %}],
308          "instance_physical_volumes": [
309              {% for disk in host.instance_physical_volumes %}
310                  "{{disk}}"
311                  {%if not loop.last %},{% endif %}
312              {% endfor %}],
313          "instance_lv_percentage": "{{ host.instance_lv_percentage }}"
314     }
315     {% endif %}
316     {% endfor %}
317 }
318 """
319
320 # /etc/ansible/roles/os_nova/templates/nova.conf.j2
321 JSON_NOVA_RBD_HOST_VAR = """
322 {
323     {% for host in hosts %}
324     "{{ host.name }}": {
325          "nova_libvirt_images_rbd_pool": "{{ nova_pool_name }}",
326          "nova_ceph_client": "{{ nova_ceph_client }}"
327     } {% if not loop.last %},{% endif %}
328     {% endfor %}
329 }
330 """
331
332
333 #
334 # /opt/ceph-ansible/group_vars/osds.yml
335 JSON_OVERRIDE = """
336 {
337     "ceph_conf_overrides": {
338         "global": {
339             "mon_max_pg_per_osd": "400",
340             "mon_pg_warn_max_object_skew": "-1",
341             "osd_pool_default_size": "{{ osd_pool_default_size }}",
342             "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
343             "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
344             "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
345             "osd_heartbeat_grace": "3",
346             "osd_heartbeat_interval": "2",
347             "mon_osd_min_down_reporters": "1",
348             "mon_osd_adjust_heartbeat_grace": "false",
349             "auth_client_required": "cephx"
350         },
351         "mgr": {
352             "mgr_modules": "dashboard"
353         },
354         "mon": {
355             "mon_health_preluminous_compat_warning": "false",
356             "mon_health_preluminous_compat": "true",
357             "mon_timecheck_interval": "60",
358             "mon_sd_reporter_subtree_level": "device",
359             "mon_clock_drift_allowed": "0.1"
360         },
361         "osd": {
362             "osd_mon_heartbeat_interval": "10",
363             "osd_mon_report_interval_min": "1",
364             "osd_mon_report_interval_max": "15"
365         }
366     }
367 }
368 """
369 JSON_OVERRIDE_CACHE = """
370 {
371     "ceph_conf_overrides": {
372         "global": {
373             "mon_max_pg_per_osd": "400",
374             "mon_pg_warn_max_object_skew": "-1",
375             "osd_pool_default_size": "{{ osd_pool_default_size }}",
376             "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
377             "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
378             "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
379             "osd_heartbeat_grace": "3",
380             "osd_heartbeat_interval": "2",
381             "mon_osd_adjust_heartbeat_grace": "false",
382             "bluestore_cache_size": "1073741824",
383             "auth_client_required": "cephx"
384         },
385         "mgr": {
386             "mgr_modules": "dashboard"
387         },
388         "mon": {
389             "mon_health_preluminous_compat_warning": "false",
390             "mon_health_preluminous_compat": "true",
391             "mon_timecheck_interval": "60",
392             "mon_sd_reporter_subtree_level": "device",
393             "mon_clock_drift_allowed": "0.1"
394         },
395         "osd": {
396             "osd_mon_heartbeat_interval": "10",
397             "osd_mon_report_interval_min": "1",
398             "osd_mon_report_interval_max": "15"
399         }
400     }
401 }
402 """
403 JSON_OVERRIDE_3CONTROLLERS = """
404 {
405     "ceph_conf_overrides": {
406         "global": {
407             "mon_max_pg_per_osd": "400",
408             "mon_pg_warn_max_object_skew": "-1",
409             "osd_pool_default_size": "{{ osd_pool_default_size }}",
410             "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
411             "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
412             "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
413             "osd_heartbeat_grace": "3",
414             "osd_heartbeat_interval": "2",
415             "mon_osd_adjust_heartbeat_grace": "false",
416             "bluestore_cache_size": "1073741824",
417             "auth_client_required": "cephx"
418         },
419         "mgr": {
420             "mgr_modules": "dashboard"
421         },
422         "mon": {
423             "mon_health_preluminous_compat_warning": "false",
424             "mon_health_preluminous_compat": "true",
425             "mon_lease": "1.0",
426             "mon_election_timeout": "2",
427             "mon_lease_renew_interval_factor": "0.4",
428             "mon_lease_ack_timeout_factor": "1.5",
429             "mon_timecheck_interval": "60",
430             "mon_sd_reporter_subtree_level": "device",
431             "mon_clock_drift_allowed": "0.1"
432         },
433         "osd": {
434             "osd_mon_heartbeat_interval": "10",
435             "osd_mon_report_interval_min": "1",
436             "osd_mon_report_interval_max": "15"
437         }
438     }
439 }
440 """
441
442 JSON_NETWORK = """
443 {
444     "public_network": "{{ public_networks }}",
445     "cluster_network": "{{ cluster_networks }}"
446 }
447 """
448
449 JSON_OS_TUNING = """
450 {
451     "os_tuning_params": [{
452         "name": "vm.min_free_kbytes",
453         "value": "1048576"
454     }]
455 }
456 """
457
458 JSON_OSD_POOL_PGNUMS = """
459 {
460     "osd_pool_images_pg_num": "{{ osd_pool_images_pg_num }}",
461     "osd_pool_volumes_pg_num": "{{ osd_pool_volumes_pg_num }}",
462     "osd_pool_vms_pg_num": "{{ osd_pool_vms_pg_num }}",
463     "osd_pool_shared_pg_num": "{{ osd_pool_shared_pg_num }}"{%- if 0 < osd_pool_caas_pg_num %},
464     "osd_pool_caas_pg_num": "{{ osd_pool_caas_pg_num }}"
465 {% endif %}
466 }
467 """
468
469 JSON_CEPH_HOSTS = """
470 {
471     "ceph-mon": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
472     "ceph-mon_hosts": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
473     "mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
474     "ceph_mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
475     "ceph-osd": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
476     "ceph-osd_hosts": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
477     "osds": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
478     "mgrs": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
479     "ceph-mgr": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ]
480 }
481 """
482 #    "storage_backend": ceph
483
484
485 # Replaces variables in /opt/openstack-ansible/playbooks/inventory/group_vars/glance_all.yml
486 JSON_GLANCE_CEPH_ALL_GROUP_VARS = """
487 {
488     {% for host in hosts %}
489     "{{ host.name }}": {
490         "glance_default_store": "rbd",
491         "glance_additional_stores": ["http", "cinder", "file"],
492         "glance_rbd_store_pool": "{{ glance_pool_name }}",
493         "glance_rbd_store_chunk_size": "8",
494         "ceph_conf": "/etc/ceph/ceph.conf"
495     } {% if not loop.last %},{% endif %}
496     {% endfor %}
497 }
498 """
499
500 JSON_GLANCE_LVM_ALL_GROUP_VARS = """
501 {
502     {% for host in hosts %}
503     "{{ host.name }}": {
504         "glance_default_store": "file"
505     } {% if not loop.last %},{% endif %}
506     {% endfor %}
507 }
508 """
509
510 # ceph-ansible variables must be set at host_vars -level
511 # ceph-ansible sample variables in group_vars
512 # group_vars - all.yml.sample
513 JSON_CEPH_ANSIBLE_ALL_HOST_VARS = """
514 {
515     {% for host in hosts %}
516     "{{ host.name }}": {
517          "mon_group_name": "mons",
518          "osd_group_name": "osds",
519          "mgr_group_name": "mgrs",
520          "ceph_stable_release": "luminous",
521          "generate_fsid": "true",
522          "cephx": "true",
523          "journal_size": "10240",
524          "osd_objectstore": "bluestore"
525     } {% if not loop.last %},{% endif %}
526     {% endfor %}
527 }
528 """
529
530 # pylint: disable=line-too-long
531 # ceph-ansible
532 # group_vars - mons.yml.sample
533 JSON_CEPH_ANSIBLE_MONS_HOST_VARS = """
534 {
535     {% for host in hosts %}
536     "{{ host.name }}": {
537          "monitor_secret": "{{ '{{ monitor_keyring.stdout }}' }}",
538          "openstack_config": true,
539          "cephkeys_access_group": "cephkeys",
540          "openstack_pools": [
541              {
542                  "name": "{{ platform_pool }}",
543                  "pg_num": "{{ osd_pool_shared_pg_num }}",
544                  "rule_name": ""
545              }{% if is_openstack_deployment %},
546              {
547                  "name": "{{ glance_pool }}",
548                  "pg_num": "{{ osd_pool_images_pg_num }}",
549                  "rule_name": ""
550              },
551              {
552                  "name": "{{ cinder_pool }}",
553                  "pg_num": "{{ osd_pool_volumes_pg_num }}",
554                  "rule_name": ""
555              },
556              {
557                  "name": "{{ nova_pool }}",
558                  "pg_num": "{{ osd_pool_vms_pg_num }}",
559                  "rule_name": ""
560              }
561         {%- endif %}
562         {%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
563              {
564                  "name": "caas",
565                  "pg_num": "{{ osd_pool_caas_pg_num }}",
566                  "rule_name": ""
567              }
568         {%- endif %}
569          ],
570          "openstack_keys": [
571              {
572                  "acls": [],
573                  "key": "$(ceph-authtool --gen-print-key)",
574                  "mode": "0600",
575                  "mon_cap": "allow r",
576                  "name": "client.shared",
577                  "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ platform_pool }}"
578              }{% if is_openstack_deployment %},
579              {
580                  "acls": [],
581                  "key": "$(ceph-authtool --gen-print-key)",
582                  "mode": "0640",
583                  "mon_cap": "allow r",
584                  "name": "client.glance",
585                  "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ glance_pool }}"
586              },
587              {
588                  "acls": [],
589                  "key": "$(ceph-authtool --gen-print-key)",
590                  "mode": "0640",
591                  "mon_cap": "allow r, allow command \\\\\\\\\\\\\\"osd blacklist\\\\\\\\\\\\\\"",
592                  "name": "client.cinder",
593                  "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ cinder_pool }}, allow rwx pool={{ nova_pool }}, allow rx pool={{ glance_pool }}"
594              }
595         {%- endif %}
596         {%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
597              {
598                  "acls": [],
599                  "key": "$(ceph-authtool --gen-print-key)",
600                  "mode": "0600",
601                  "mon_cap": "allow r",
602                  "name": "client.caas",
603                  "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool=caas"
604              }
605         {%- endif %}
606         ]
607     } {% if not loop.last %},{% endif %}
608     {% endfor %}
609 }
610 """
611 # pylint: enable=line-too-long
612
613 # ceph-ansible
614 # group_vars - osds.yml.sample
615 JSON_CEPH_ANSIBLE_OSDS_HOST_VARS = """
616 {
617     {% for host in hosts %}
618     "{{ host.name }}": {
619          "raw_journal_devices": [],
620          "journal_collocation": true,
621          "raw_multi_journal": false,
622          "dmcrytpt_journal_collocation": false,
623          "dmcrypt_dedicated_journal": false,
624          "osd_scenario": "collocated",
625          "dedicated_devices": []
626     } {% if not loop.last %},{% endif %}
627     {% endfor %}
628 }
629 """
630
631
632 JSON_SINGLE_CONTROLLER_VAR = """
633 {
634     {% for host in hosts %}
635     "{{ host.name }}": {
636          "single_controller_host": true
637     } {% if not loop.last %},{% endif %}
638     {% endfor %}
639 }
640 """
641
642
643 class Host(object):
644     def __init__(self):
645         self.name = None
646         self.is_lvm = None
647         self.is_osd = None
648         self.is_mon = None
649         self.is_mgr = None
650         self.is_rbd_ceph = None
651         self.ceph_osd_disks = []
652         self.lvm_disks = []
653         self.cinder_disks = []
654         self.is_controller = False
655         self.is_compute = False
656         self.is_storage = False
657         self.instance_physical_volumes = []
658         self.cinder_physical_volumes = []
659         self.instance_disks = []
660         self.instance_lv_percentage = ""
661         self.os_disk = ""
662         self.osd_disks_ids = []
663         self.vg_percentage = NOT_INSTANCE_NODE_VG_PERCENTAGE
664         self.mount_dir = ""
665         self.bare_lvm_disks = None
666         self.is_bare_lvm = None
667         self.bare_lvm_physical_volumes = None
668         self.mount_options = None
669         self.bare_lvm_lv_name = None
670
671
672 class storageinventory(cmansibleinventoryconfig.CMAnsibleInventoryConfigPlugin):
673
674     def __init__(self, confman, inventory, ownhost):
675         super(storageinventory, self).__init__(confman, inventory, ownhost)
676         self.hosts = []
677         self.storage_hosts = []
678         self.compute_hosts = []
679         self.controller_hosts = []
680         self._mon_hosts = []
681         self._osd_hosts = []
682         self._mgr_hosts = []
683         self.single_node_config = False
684         self._networking_config_handler = self.confman.get_networking_config_handler()
685         self._hosts_config_handler = self.confman.get_hosts_config_handler()
686         self._storage_config_handler = self.confman.get_storage_config_handler()
687         self._openstack_config_handler = self.confman.get_openstack_config_handler()
688         self._sp_config_handler = self.confman.get_storage_profiles_config_handler()
689         self._caas_config_handler = self.confman.get_caas_config_handler()
690         self._ceph_caas_pg_proportion = 0.0
691         self._ceph_openstack_pg_proportion = 0.0
692         self._cinder_pool_name = 'volumes'
693         self._glance_pool_name = 'images'
694         self._nova_pool_name = 'vms'
695         self._platform_pool_name = 'shared'
696         self._storage_profile_attribute_properties = {
697             'lvm_cinder_storage_partitions': {
698                 'backends': ['lvm'],
699                 'getter': self._sp_config_handler.get_profile_lvm_cinder_storage_partitions
700             },
701             'mount_options': {
702                 'backends': ['bare_lvm'],
703                 'getter': self._sp_config_handler.get_profile_bare_lvm_mount_options
704             },
705             'mount_dir': {
706                 'backends': ['bare_lvm'],
707                 'getter': self._sp_config_handler.get_profile_bare_lvm_mount_dir
708             },
709             'lv_name': {
710                 'backends': ['bare_lvm'],
711                 'getter': self._sp_config_handler.get_profile_bare_lvm_lv_name
712             },
713             'nr_of_ceph_osd_disks': {
714                 'backends': ['ceph'],
715                 'getter': self._sp_config_handler.get_profile_nr_of_ceph_osd_disks
716             },
717             'lvm_instance_storage_partitions': {
718                 'backends': ['lvm', 'bare_lvm'],
719                 'getter': self._sp_config_handler.get_profile_lvm_instance_storage_partitions
720             },
721             'lvm_instance_cow_lv_storage_percentage': {
722                 'backends': ['lvm'],
723                 'getter': self._sp_config_handler.get_profile_lvm_instance_cow_lv_storage_percentage
724             },
725             'openstack_pg_proportion': {
726                 'backends': ['ceph'],
727                 'getter': self._sp_config_handler.get_profile_ceph_openstack_pg_proportion
728             },
729             'caas_pg_proportion': {
730                 'backends': ['ceph'],
731                 'getter': self._sp_config_handler.get_profile_ceph_caas_pg_proportion
732             },
733         }
734
735     def _is_host_managment(self, host):
736         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_management_service_profile(), host)
737
738     def _is_host_controller(self, host):
739         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_controller_service_profile(), host)
740
741     def _is_profile_in_hosts_profiles(self, profile, host):
742         node_service_profiles = self._hosts_config_handler.get_service_profiles(host)
743         return profile in node_service_profiles
744
745     def _is_host_compute(self, host):
746         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_compute_service_profile(), host)
747
748     def _is_host_caas_master(self, host):
749         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_caasmaster_service_profile(), host)
750
751     def _is_host_storage(self, host):
752         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_storage_service_profile(), host)
753
754     def _is_controller_has_compute(self):
755         if set.intersection(set(self.compute_hosts), set(self.controller_hosts)):
756             return True
757         return False
758
759     def _is_collocated_controller_node_config(self):
760         if set.intersection(set(self.storage_hosts), set(self.controller_hosts)):
761             return True
762         return False
763
764     def _is_collocated_3controllers_config(self):
765         if (self._is_collocated_controller_node_config() and
766                 (len(self.controller_hosts) == 3) and (len(self.hosts) == 3)):
767             return True
768         return False
769
770     def _is_dedicated_storage_config(self):
771         collocated_config = set.intersection(set(self.storage_hosts), set(self.controller_hosts))
772         if collocated_config and (collocated_config == set(self.controller_hosts)):
773             return False
774         elif self.storage_hosts:
775             return True
776         else:
777             return False
778
779     def handle_bootstrapping(self):
780         self.handle('bootstrapping')
781
782     def handle_provisioning(self):
783         self.handle('provisioning')
784
785     def handle_postconfig(self):
786         self.handle('postconfig')
787
788     def handle_setup(self):
789         pass
790
791     def _template_and_add_vars_to_hosts(self, template, **variables):
792         try:
793             text = Environment().from_string(template).render(variables)
794             if text:
795                 self._add_vars_for_hosts(text)
796         except Exception as exp:
797             raise cmerror.CMError(str(exp))
798
799     def _add_vars_for_hosts(self, inventory_text):
800         inventory = json.loads(inventory_text)
801         for host in inventory.keys():
802             for var, value in inventory[host].iteritems():
803                 self.add_host_var(host, var, value)
804
805     @staticmethod
806     def _read_cinder_ceph_client_uuid():
807         if os.path.isfile(USER_SECRETS):
808             d = dict(line.split(':', 1) for line in open(USER_SECRETS))
809             cinder_ceph_client_uuid = d['cinder_ceph_client_uuid'].strip()
810             return cinder_ceph_client_uuid
811         else:
812             raise cmerror.CMError("The file {} does not exist.".format(USER_SECRETS))
813
814     def _add_cinder_backends(self):
815         self._template_and_add_vars_to_hosts(
816             JSON_CINDER_BACKENDS_HOST_VAR,
817             hosts=self.controller_hosts,
818             installation_controller_ip=self._installation_host_ip,
819             cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
820             openstack_storage=self._openstack_config_handler.get_storage_backend(),
821             cinder_pool_name=self._cinder_pool_name)
822
823     def _add_external_ceph_cinder_backends(self):
824         handler = self._storage_config_handler
825         self._template_and_add_vars_to_hosts(
826             JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR,
827             hosts=self.hosts,
828             cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
829             ext_ceph_user=handler.get_ext_ceph_ceph_user(),
830             ext_ceph_user_key=handler.get_ext_ceph_ceph_user_key(),
831             ext_ceph_fsid=handler.get_ext_ceph_fsid(),
832             ext_ceph_mon_hosts=", ".join(handler.get_ext_ceph_mon_hosts()),
833             nova_pool_name=self._nova_pool_name,
834             glance_pool_name=self._glance_pool_name,
835             cinder_pool_name=self._cinder_pool_name,
836             platform_pool_name=self._platform_pool_name)
837
838     def _add_storage_nodes_configs(self):
839         rbdhosts = []
840         for host in self.hosts:
841             if host.is_rbd_ceph:
842                 rbdhosts.append(host)
843         self._template_and_add_vars_to_hosts(JSON_STORAGE_HOST_VAR, hosts=rbdhosts)
844
845     def _add_hdd_storage_configs(self):
846         self._template_and_add_vars_to_hosts(
847             JSON_STORAGE_HOST_DISK_CONFIGURATION,
848             hosts=self.hosts,
849             rootdisk_device=DEFAULT_ROOTDISK_DEVICE)
850
851     def _add_lvm_storage_configs(self):
852         self._template_and_add_vars_to_hosts(JSON_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
853
854     def _add_bare_lvm_storage_configs(self):
855         self._template_and_add_vars_to_hosts(JSON_BARE_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
856
857     def _add_instance_devices(self):
858         self._template_and_add_vars_to_hosts(JSON_DEVICE_HOST_VAR, hosts=self.compute_hosts)
859
860     def _add_ceph_hosts(self):
861         self._add_host_group(
862             Environment().from_string(JSON_CEPH_HOSTS).render(
863                 mons=self._mon_hosts,
864                 osds=self._osd_hosts,
865                 mgrs=self._mgr_hosts))
866
867         self._add_global_parameters(
868             Environment().from_string(JSON_CEPH_HOSTS).render(
869                 mons=self._mon_hosts,
870                 osds=self._osd_hosts,
871                 mgrs=self._mgr_hosts))
872
873     def _add_glance(self):
874         if self.is_ceph_backend:
875             self._template_and_add_vars_to_hosts(
876                 JSON_GLANCE_CEPH_ALL_GROUP_VARS,
877                 hosts=self.hosts,
878                 glance_pool_name=self._glance_pool_name)
879         elif self.is_lvm_backend:
880             self._template_and_add_vars_to_hosts(JSON_GLANCE_LVM_ALL_GROUP_VARS, hosts=self.hosts)
881
882     def _add_ceph_ansible_all_sample_host_vars(self):
883         self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_ALL_HOST_VARS, hosts=self.hosts)
884
885     def _add_ceph_ansible_mons_sample_host_vars(self):
886         self._template_and_add_vars_to_hosts(
887             JSON_CEPH_ANSIBLE_MONS_HOST_VARS,
888             hosts=self.hosts,
889             **self._get_ceph_vars())
890
891     def _get_ceph_vars(self):
892         return {
893             'osd_pool_images_pg_num':  self._calculated_images_pg_num,
894             'osd_pool_volumes_pg_num': self._calculated_volumes_pg_num,
895             'osd_pool_vms_pg_num':     self._calculated_vms_pg_num,
896             'osd_pool_shared_pg_num':  self._calculated_shared_pg_num,
897             'osd_pool_caas_pg_num':    self._calculated_caas_pg_num,
898             'is_openstack_deployment': self._is_openstack_deployment,
899             'is_caas_deployment':      self._is_caas_deployment,
900             'is_hybrid_deployment':    self._is_hybrid_deployment,
901             'nova_pool':               self._nova_pool_name,
902             'glance_pool':             self._glance_pool_name,
903             'cinder_pool':             self._cinder_pool_name,
904             'platform_pool':           self._platform_pool_name
905         }
906
907     def _add_ceph_ansible_osds_sample_host_vars(self):
908         self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_OSDS_HOST_VARS, hosts=self.hosts)
909
910     def _add_nova(self):
911         if self.is_external_ceph_backend:
912             nova_ceph_client = self._storage_config_handler.get_ext_ceph_ceph_user()
913         else:
914             nova_ceph_client = 'cinder'
915
916         self._template_and_add_vars_to_hosts(
917             JSON_NOVA_RBD_HOST_VAR, hosts=self.compute_hosts,
918             nova_pool_name=self._nova_pool_name,
919             nova_ceph_client=nova_ceph_client)
920
921     def _add_single_controller_host_var(self):
922         self._template_and_add_vars_to_hosts(
923             JSON_SINGLE_CONTROLLER_VAR, hosts=self.controller_hosts)
924
925     def _add_global_parameters(self, text):
926         try:
927             inventory = json.loads(text)
928             for var, value in inventory.iteritems():
929                 self.add_global_var(var, value)
930         except Exception as exp:
931             raise cmerror.CMError(str(exp))
932
933     def _add_host_group(self, text):
934         try:
935             inventory = json.loads(text)
936             for var, value in inventory.iteritems():
937                 self.add_host_group(var, value)
938         except Exception as exp:
939             raise cmerror.CMError(str(exp))
940
941     @property
942     def cluster_network_cidrs(self):
943         cidrs = []
944         network = self._networking_config_handler.get_infra_storage_cluster_network_name()
945         for domain in self._networking_config_handler.get_network_domains(network):
946             cidrs.append(self._networking_config_handler.get_network_cidr(network, domain))
947         return ','.join(cidrs)
948
949     @property
950     def public_network_cidrs(self):
951         cidrs = set()
952         cluster_network = self._networking_config_handler.get_infra_storage_cluster_network_name()
953         public_network = self._networking_config_handler.get_infra_internal_network_name()
954         for domain in self._networking_config_handler.get_network_domains(cluster_network):
955             cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
956         for host in self._mon_hosts:
957             domain = self._hosts_config_handler.get_host_network_domain(host.name)
958             cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
959         return ','.join(cidrs)
960
961     def _add_networks(self):
962         self._add_global_parameters(
963             Environment().from_string(JSON_NETWORK).render(
964                 public_networks=self.public_network_cidrs,
965                 cluster_networks=self.cluster_network_cidrs))
966
967     def _add_monitor_address(self):
968         infra_storage_network = self._networking_config_handler.get_infra_internal_network_name()
969         for host in self._mon_hosts:
970             monitor_address = \
971                 self._networking_config_handler.get_host_ip(host.name, infra_storage_network)
972             self.add_host_var(host.name, "monitor_address", monitor_address)
973
974     def _add_override_settings(self):
975         ceph_osd_pool_size = self._storage_config_handler.get_ceph_osd_pool_size()
976
977         if self._is_collocated_3controllers_config():
978             self._add_global_parameters(
979                 Environment().from_string(JSON_OVERRIDE_3CONTROLLERS).render(
980                     osd_pool_default_size=ceph_osd_pool_size,
981                     osd_pool_default_min_size=str(ceph_osd_pool_size-1),
982                     osd_pool_default_pg_num=self._calculated_default_pg_num))
983
984             self._add_global_parameters(
985                 Environment().from_string(JSON_OS_TUNING).render())
986
987         elif self._is_controller_has_compute():
988             self._add_global_parameters(
989                 Environment().from_string(JSON_OVERRIDE_CACHE).render(
990                     osd_pool_default_size=ceph_osd_pool_size,
991                     osd_pool_default_min_size=str(ceph_osd_pool_size-1),
992                     osd_pool_default_pg_num=self._calculated_default_pg_num))
993
994             self._add_global_parameters(
995                 Environment().from_string(JSON_OS_TUNING).render())
996         else:
997             self._add_global_parameters(
998                 Environment().from_string(JSON_OVERRIDE).render(
999                     osd_pool_default_size=ceph_osd_pool_size,
1000                     osd_pool_default_min_size=str(ceph_osd_pool_size-1),
1001                     osd_pool_default_pg_num=self._calculated_default_pg_num))
1002
1003     def _calculate_pg_num(self, pool_data_percentage):
1004         pgnum = PGNum(self._total_number_of_osds,
1005                       pool_data_percentage,
1006                       self._number_of_replicas)
1007         return pgnum.calculate()
1008
1009     @property
1010     def _calculated_default_pg_num(self):
1011         return self._calculate_pg_num(self._pool_data_percentage)
1012
1013     @property
1014     def _calculated_volumes_pg_num(self):
1015         return self._calculate_pg_num(
1016             OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1017
1018     @property
1019     def _calculated_images_pg_num(self):
1020         return self._calculate_pg_num(
1021             OSD_POOL_IMAGES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1022
1023     @property
1024     def _calculated_vms_pg_num(self):
1025         return self._calculate_pg_num(
1026             OSD_POOL_VMS_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1027
1028     @property
1029     def _calculated_shared_pg_num(self):
1030         return self._calculate_pg_num(
1031             OSD_POOL_SHARED_PG_NUM_PERCENTAGE)
1032
1033     @property
1034     def _calculated_caas_pg_num(self):
1035         if self._ceph_caas_pg_proportion > 0:
1036             return self._calculate_pg_num(
1037                 (OSD_POOL_CAAS_PG_NUM_PERCENTAGE - OSD_POOL_SHARED_PG_NUM_PERCENTAGE) *
1038                 self._ceph_caas_pg_proportion)
1039         return 0
1040
1041     def _add_osd_pool_pg_nums(self):
1042         self._add_global_parameters(
1043             Environment().from_string(JSON_OSD_POOL_PGNUMS).render(**self._get_ceph_vars()))
1044
1045     @property
1046     def _installation_host(self):
1047         return self._hosts_config_handler.get_installation_host()
1048
1049     @property
1050     def _infra_internal_network_name(self):
1051         return self._networking_config_handler.get_infra_internal_network_name()
1052
1053     @property
1054     def _installation_host_ip(self):
1055         return self._networking_config_handler.get_host_ip(
1056             self._installation_host, self._infra_internal_network_name)
1057
1058     @property
1059     def is_ceph_backend(self):
1060         return self._storage_config_handler.is_ceph_enabled()
1061
1062     @property
1063     def is_external_ceph_backend(self):
1064         return (self._storage_config_handler.is_external_ceph_enabled() and
1065                 self._ceph_is_openstack_storage_backend)
1066
1067     def _set_external_ceph_pool_names(self):
1068         if self.is_external_ceph_backend:
1069             h = self._storage_config_handler
1070             self._nova_pool_name = h.get_ext_ceph_nova_pool()
1071             self._cinder_pool_name = h.get_ext_ceph_cinder_pool()
1072             self._glance_pool_name = h.get_ext_ceph_glance_pool()
1073             self._platform_pool_name = h.get_ext_ceph_platform_pool()
1074
1075     @property
1076     def _lvm_is_openstack_storage_backend(self):
1077         return True if self._openstack_config_handler.get_storage_backend() == 'lvm' else False
1078
1079     @property
1080     def _ceph_is_openstack_storage_backend(self):
1081         return True if self._openstack_config_handler.get_storage_backend() == 'ceph' else False
1082
1083     @property
1084     def is_lvm_backend(self):
1085         return (self._storage_config_handler.is_lvm_enabled() and
1086                 self._lvm_is_openstack_storage_backend)
1087
1088     @property
1089     def instance_default_backend(self):
1090         return self._openstack_config_handler.get_instance_default_backend()
1091
1092     @property
1093     def _hosts_with_ceph_storage_profile(self):
1094         # return filter(lambda host: host.is_rbd, self.hosts)
1095         return [host for host in self.hosts if host.is_rbd_ceph]
1096
1097     @property
1098     def _is_openstack_deployment(self):
1099         return self._caas_config_handler.is_openstack_deployment()
1100
1101     @property
1102     def _is_caas_deployment(self):
1103         return self._caas_config_handler.is_caas_deployment()
1104
1105     @property
1106     def _is_hybrid_deployment(self):
1107         return self._caas_config_handler.is_hybrid_deployment()
1108
1109     def handle(self, phase):
1110         self._init_jinja_environment()
1111         self.add_global_var("external_ceph_configured", self.is_external_ceph_backend)
1112         self.add_global_var("ceph_configured", self.is_ceph_backend)
1113         self.add_global_var("lvm_configured", self.is_lvm_backend)
1114         if phase == 'bootstrapping':
1115             self._add_hdd_storage_configs()
1116         else:
1117             self._add_hdd_storage_configs()
1118             if self.is_external_ceph_backend:
1119                 self._set_external_ceph_pool_names()
1120                 self._add_external_ceph_cinder_backends()
1121             else:
1122                 if self._is_openstack_deployment:
1123                     self._add_cinder_backends()
1124                     self._add_glance()
1125
1126             ceph_hosts = self._hosts_with_ceph_storage_profile
1127             if ceph_hosts:
1128                 self._set_ceph_pg_proportions(ceph_hosts)
1129                 self._add_ceph_ansible_all_sample_host_vars()
1130                 self._add_ceph_ansible_mons_sample_host_vars()
1131                 self._add_ceph_ansible_osds_sample_host_vars()
1132                 self._add_ceph_hosts()
1133                 self._add_storage_nodes_configs()
1134                 self._add_monitor_address()
1135                 self._add_override_settings()
1136                 self._add_osd_pool_pg_nums()
1137                 self._add_networks()
1138                 self.add_global_var("cinder_ceph_client_uuid", self._read_cinder_ceph_client_uuid())
1139             if self.is_lvm_backend:
1140                 self._add_lvm_storage_configs()
1141             self._add_bare_lvm_storage_configs()
1142
1143             self.add_global_var("instance_default_backend", self.instance_default_backend)
1144             self.add_global_var("storage_single_node_config", self.single_node_config)
1145             self.add_global_var("one_controller_node_config", self._is_one_controller_node_config)
1146             if self._is_one_controller_node_config:
1147                 self._add_single_controller_host_var()
1148             self.add_global_var("collocated_controller_node_config",
1149                                 self._is_collocated_controller_node_config())
1150             self.add_global_var("dedicated_storage_node_config",
1151                                 self._is_dedicated_storage_config())
1152             self.add_global_var("storage_one_controller_multi_nodes_config",
1153                                 self._is_one_controller_multi_nodes_config)
1154             if self.instance_default_backend == 'rbd':
1155                 self._add_nova()
1156             elif self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1157                 self._add_instance_devices()
1158
1159     def _set_ceph_pg_proportions(self, ceph_hosts):
1160         # FIXME: First storage host's storage profile assumed to get pg proportion values
1161         hostname = ceph_hosts[0].name
1162         if self._is_hybrid_deployment:
1163             self._ceph_openstack_pg_proportion = self._get_ceph_openstack_pg_proportion(hostname)
1164             self._ceph_caas_pg_proportion = self._get_ceph_caas_pg_proportion(hostname)
1165         elif self._is_openstack_deployment:
1166             self._ceph_openstack_pg_proportion = 1.0
1167             self._ceph_caas_pg_proportion = 0.0
1168         elif self._is_caas_deployment:
1169             self._ceph_openstack_pg_proportion = 0.0
1170             self._ceph_caas_pg_proportion = 1.0
1171
1172     def _init_host_data(self):
1173         hosts = self._hosts_config_handler.get_enabled_hosts()
1174         self.single_node_config = True if len(hosts) == 1 else False
1175         for name in hosts:
1176             host = self._initialize_host_object(name)
1177             self.hosts.append(host)
1178             if host.is_osd:
1179                 self._osd_hosts.append(host)
1180             if host.is_mon:
1181                 self._mon_hosts.append(host)
1182             if host.is_mgr:
1183                 self._mgr_hosts.append(host)
1184
1185         for host in self.hosts:
1186             if host.is_compute:
1187                 self.compute_hosts.append(host)
1188             if host.is_controller:
1189                 self.controller_hosts.append(host)
1190             if host.is_storage:
1191                 self.storage_hosts.append(host)
1192
1193     @property
1194     def _number_of_osd_hosts(self):
1195         return len(self._osd_hosts)
1196
1197     @property
1198     def _is_one_controller_multi_nodes_config(self):
1199         if len(self.controller_hosts) == 1 and not self.single_node_config:
1200             return True
1201         return False
1202
1203     @property
1204     def _is_one_controller_node_config(self):
1205         if len(self.controller_hosts) == 1:
1206             return True
1207         return False
1208
1209     @property
1210     def _number_of_osds_per_host(self):
1211         first_osd_host = self._osd_hosts[0].name
1212         return self._get_nr_of_ceph_osd_disks(first_osd_host)
1213
1214     @property
1215     def _total_number_of_osds(self):
1216         return self._number_of_osds_per_host * self._number_of_osd_hosts
1217
1218     @property
1219     def _number_of_pools(self):
1220         """TODO: Get dynamically"""
1221         return NUMBER_OF_POOLS
1222
1223     @property
1224     def _pool_data_percentage(self):
1225         return float(1.0 / self._number_of_pools)
1226
1227     @property
1228     def _number_of_replicas(self):
1229         num = self._storage_config_handler.get_ceph_osd_pool_size()
1230         return 2 if num == 0 else num
1231
1232     def _init_jinja_environment(self):
1233         self._init_host_data()
1234
1235     def _is_backend_configured(self, backend, host_name):
1236         try:
1237             if self._get_storage_profile_for_backend(host_name, backend):
1238                 return True
1239             return False
1240         except configerror.ConfigError:
1241             return False
1242
1243     def _get_storage_profile_for_backend(self, host_name, *backends):
1244         storage_profiles = self._hosts_config_handler.get_storage_profiles(host_name)
1245         sp_handler = self._sp_config_handler
1246         for storage_profile in storage_profiles:
1247             if sp_handler.get_profile_backend(storage_profile) in backends:
1248                 return storage_profile
1249         return None
1250
1251     def _get_nr_of_ceph_osd_disks(self, host_name):
1252         return self._get_storage_profile_attribute(host_name, 'nr_of_ceph_osd_disks')
1253
1254     def _get_storage_profile_attribute(self, host_name, attribute):
1255         attribute_properties = self._storage_profile_attribute_properties[attribute]
1256         storage_profile = self._get_storage_profile_for_backend(host_name,
1257                                                                 *attribute_properties['backends'])
1258         if storage_profile:
1259             return attribute_properties['getter'](storage_profile)
1260         raise cmerror.CMError(str("Failed to get %s" % attribute))
1261
1262     def _get_ceph_openstack_pg_proportion(self, host_name):
1263         return self._get_storage_profile_attribute(host_name, 'openstack_pg_proportion')
1264
1265     def _get_ceph_caas_pg_proportion(self, host_name):
1266         return self._get_storage_profile_attribute(host_name, 'caas_pg_proportion')
1267
1268     def _get_lvm_instance_storage_partitions(self, host_name):
1269         try:
1270             if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1271                 return self._get_storage_profile_attribute(
1272                     host_name, 'lvm_instance_storage_partitions')
1273         except configerror.ConfigError:
1274             pass
1275
1276         if self.instance_default_backend not in ALL_DEFAULT_INSTANCE_BACKENDS:
1277             raise cmerror.CMError(
1278                 str("Unknown instance_default_backend %s "
1279                     "not supported" % self.instance_default_backend))
1280         return []
1281
1282     def _get_lvm_cinder_storage_partitions(self, host_name):
1283         return self._get_storage_profile_attribute(host_name, 'lvm_cinder_storage_partitions')
1284
1285     def _get_bare_lvm_mount_options(self, host_name):
1286         return self._get_storage_profile_attribute(host_name, 'mount_options')
1287
1288     def _get_bare_lvm_mount_dir(self, host_name):
1289         return self._get_storage_profile_attribute(host_name, 'mount_dir')
1290
1291     def _get_bare_lvm_lv_name(self, host_name):
1292         return self._get_storage_profile_attribute(host_name, 'lv_name')
1293
1294     def _get_instance_lv_percentage(self, host_name):
1295         try:
1296             if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1297                 return self._get_storage_profile_attribute(
1298                     host_name, 'lvm_instance_cow_lv_storage_percentage')
1299         except configerror.ConfigError:
1300             return DEFAULT_INSTANCE_LV_PERCENTAGE
1301         raise cmerror.CMError(str("Failed to found lvm from storage_profiles"))
1302
1303     def _is_osd_host(self, name):
1304         try:
1305             return bool(name in self._hosts_config_handler.get_service_profile_hosts('storage'))
1306         except configerror.ConfigError:
1307             return False
1308
1309     def _is_rbd_ceph_configured(self, host_name):
1310         return self._is_backend_configured('ceph', host_name)
1311
1312     def _is_lvm_configured(self, host_name):
1313         return self._is_backend_configured('lvm', host_name)
1314
1315     def _is_bare_lvm_configured(self, host_name):
1316         return self._is_backend_configured('bare_lvm', host_name)
1317
1318     def _get_hw_type(self, name):
1319         hwmgmt_addr = self._hosts_config_handler.get_hwmgmt_ip(name)
1320         hwmgmt_user = self._hosts_config_handler.get_hwmgmt_user(name)
1321         hwmgmt_pass = self._hosts_config_handler.get_hwmgmt_password(name)
1322         hwmgmt_priv_level = self._hosts_config_handler.get_hwmgmt_priv_level(name)
1323         return hw.get_hw_type(hwmgmt_addr, hwmgmt_user, hwmgmt_pass, hwmgmt_priv_level)
1324
1325     @staticmethod
1326     def _get_os_disk(hw_type):
1327         return hw.get_os_hd(hw_type)
1328
1329     def _get_osd_disks_for_embedded_deployment(self, host_name):
1330         return self._hosts_config_handler.get_ceph_osd_disks(host_name)
1331
1332     @staticmethod
1333     def _get_osd_disks(hw_type):
1334         return hw.get_hd_with_usage(hw_type, "osd")
1335
1336     def _by_path_disks(self, hw_type, nr_of_disks):
1337         return self._get_osd_disks(hw_type)[0:nr_of_disks]
1338
1339     @staticmethod
1340     def _is_by_path_disks(disk_list):
1341         return [disk for disk in disk_list if "by-path" in disk]
1342
1343     def _get_physical_volumes(self, disk_list):
1344         partition_nr = "1"
1345         if self._is_by_path_disks(disk_list):
1346             return [disk+"-part"+partition_nr for disk in disk_list]
1347         else:
1348             return [disk+partition_nr for disk in disk_list]
1349
1350     def _initialize_host_object(self, name):
1351         host = Host()
1352         host.name = name
1353         host.is_mgr = self._is_host_managment(host.name)
1354         host.is_controller = self._is_host_controller(host.name)
1355         host.is_compute = self._is_host_compute(host.name)
1356         host.is_storage = self._is_host_storage(host.name)
1357         host.is_rbd_ceph = self._is_rbd_ceph_configured(host.name)
1358         host.is_lvm = self._is_lvm_configured(host.name)
1359         host.is_bare_lvm = self._is_bare_lvm_configured(host.name)
1360         host.is_osd = self._is_osd_host(host.name)
1361         host.is_mon = host.is_mgr
1362         hw_type = self._get_hw_type(name)
1363         host.os_disk = self._get_os_disk(hw_type)
1364         if host.is_bare_lvm:
1365             partitions = self._get_lvm_instance_storage_partitions(host.name)
1366             host.bare_lvm_disks = self._by_path_disks(hw_type, len(partitions))
1367             host.bare_lvm_physical_volumes = self._get_physical_volumes(host.bare_lvm_disks)
1368             host.mount_options = self._get_bare_lvm_mount_options(host.name)
1369             host.mount_dir = self._get_bare_lvm_mount_dir(host.name)
1370             host.bare_lvm_lv_name = self._get_bare_lvm_lv_name(host.name)
1371
1372         if host.is_compute and self.instance_default_backend != 'rbd':
1373             host.vg_percentage = INSTANCE_NODE_VG_PERCENTAGE
1374
1375         if self.is_lvm_backend and host.is_controller:
1376             nr_of_cinder_disks = int(len(self._get_lvm_cinder_storage_partitions(host.name)))
1377             nr_of_nova_disks = int(len(self._get_lvm_instance_storage_partitions(host.name)))
1378             nr_of_all_disks = nr_of_cinder_disks + nr_of_nova_disks
1379             if nr_of_nova_disks > 0:
1380                 host.cinder_disks = \
1381                     self._by_path_disks(hw_type, nr_of_all_disks)[-nr_of_cinder_disks:]
1382             else:
1383                 host.cinder_disks = self._by_path_disks(hw_type, nr_of_cinder_disks)
1384             host.cinder_physical_volumes = self._get_physical_volumes(host.cinder_disks)
1385
1386         if host.is_rbd_ceph:
1387             nr_of_osd_disks = self._get_nr_of_ceph_osd_disks(host.name)
1388             if self._caas_config_handler.is_vnf_embedded_deployment():
1389                 host.ceph_osd_disks = \
1390                     self._get_osd_disks_for_embedded_deployment(host.name)[0:nr_of_osd_disks]
1391             else:
1392                 host.ceph_osd_disks = self._get_osd_disks(hw_type)[0:nr_of_osd_disks]
1393             host.osd_disks_ids = range(1, nr_of_osd_disks+1)
1394
1395         if host.is_lvm and host.is_compute:
1396             partitions = self._get_lvm_instance_storage_partitions(host.name)
1397             host.instance_disks = self._by_path_disks(hw_type, len(partitions))
1398             host.instance_physical_volumes = self._get_physical_volumes(host.instance_disks)
1399             host.instance_lv_percentage = self._get_instance_lv_percentage(host.name)
1400         return host