storageinventory: Refactor ceph key generation
[ta/cm-plugins.git] / inventoryhandlers / storageinventory / storageinventory.py
1 # Copyright 2019 Nokia
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 #    http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # pylint: disable=missing-docstring,invalid-name,too-few-public-methods,too-many-instance-attributes,too-many-lines
16 import os
17 import json
18 import subprocess
19 from jinja2 import Environment
20 from cmframework.apis import cmansibleinventoryconfig
21 from cmframework.apis import cmerror
22 from cmdatahandlers.api import configerror
23 from serviceprofiles import profiles
24 import hw_detector.hw_detect_lib as hw
25
26
27 import math
28
29 NEAREST_POWER_OF_2_PERCENTAGE = 0.25
30
31 TARGET_PGS_PER_OSD_NO_INCREASE_EXPECTED = 100
32 TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED = 200
33 TARGET_PGS_PER_OSD_TWO_TO_THREE_TIMES_SIZE_INCREASE_EXPECTED = 300
34 # Please visit ceph.com/pgcalc for details on previous values
35
36 MINIMUM_PG_NUM = 32
37
38
39 class PGNum(object):
40     """Calculates the pg_num for the given attributes."""
41
42     def __init__(self, number_of_pool_osds, pool_data_percentage, number_of_replicas):
43         self._number_of_pool_osds = number_of_pool_osds
44         self._pool_data_percentage = pool_data_percentage
45         self._number_of_replicas = number_of_replicas
46
47     @staticmethod
48     def _round_up_to_closest_power_of_2(num):
49         """Smallest power of 2 greater than or equal to num."""
50         return 2**(num-1).bit_length() if num > 0 else 1
51
52     @staticmethod
53     def _round_down_to_closest_power_of_2(num):
54         """Largest power of 2 less than or equal to num."""
55         return 2**(num.bit_length()-1) if num > 0 else 1
56
57     @staticmethod
58     def _check_percentage_of_values(diff_to_lower, org_pgnum):
59         """ If the nearest power of 2 is more than 25% below the original value,
60         the next higher power of 2 is used. Please visit ceph.com/pgcalc
61         """
62         return float(float(diff_to_lower) / float(org_pgnum)) > NEAREST_POWER_OF_2_PERCENTAGE
63
64     def _rounded_pgnum_to_the_nearest_power_of_2(self, pgnum):
65         higher_power = self._round_up_to_closest_power_of_2(pgnum)
66         lower_power = self._round_down_to_closest_power_of_2(pgnum)
67         diff_to_lower = pgnum - lower_power
68         if pgnum != 0 and self._check_percentage_of_values(diff_to_lower, pgnum):
69             return higher_power
70         return lower_power
71
72     def _calculate_pg_num_formula(self, number_of_pool_osds, pool_percentage):
73         return TARGET_PGS_PER_OSD_UP_TO_DOUBLE_SIZE_INCREASE_EXPECTED \
74                * number_of_pool_osds * float(pool_percentage) / self._number_of_replicas
75
76     def _select_pgnum_formula_result(self, number_of_pool_osds, pool_percentage):
77         pgnum = self._calculate_pg_num_formula(number_of_pool_osds, pool_percentage)
78         return int(math.ceil(max(pgnum, MINIMUM_PG_NUM)))
79
80     def calculate(self):
81         """ The formula of the calculation can be found from ceph.com/pgcalc.
82
83             pgnum = (target_pgs x number_of_osds_in_pool x pool_percentage)/number_of_replicas
84             return : rounded pgnum to the nearest power of 2
85
86         """
87         pgnum = self._select_pgnum_formula_result(
88             self._number_of_pool_osds, self._pool_data_percentage)
89         return self._rounded_pgnum_to_the_nearest_power_of_2(pgnum)
90
91
92 NUMBER_OF_POOLS = 4
93 SUPPORTED_INSTANCE_BACKENDS = ['default', 'cow', 'lvm']
94 ALL_DEFAULT_INSTANCE_BACKENDS = SUPPORTED_INSTANCE_BACKENDS + ['rbd']
95
96 DEFAULT_INSTANCE_LV_PERCENTAGE = "100"
97
98 USER_SECRETS = "/etc/openstack_deploy/user_secrets.yml"
99
100 # Ceph PG share percentages for Openstack pools
101 OSD_POOL_IMAGES_PG_NUM_PERCENTAGE = 0.09
102 OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE = 0.69
103 OSD_POOL_VMS_PG_NUM_PERCENTAGE = 0.20
104 OSD_POOL_SHARED_PG_NUM_PERCENTAGE = 0.02
105 # Ceph PG share percentages for CaaS pools
106 OSD_POOL_CAAS_PG_NUM_PERCENTAGE = 1.0
107
108 DEFAULT_ROOTDISK_DEVICE = "/dev/sda"
109 # root disk partition 2 system volume group VG percentages
110 INSTANCE_NODE_VG_PERCENTAGE = 0.47
111 NOT_INSTANCE_NODE_VG_PERCENTAGE = 1
112 """
113 /dev/sda1 fixed partition size : 50GiB fixed size = 10% of the total disk size
114 /dev/sda2 system VG partition size: 47% of remaining total disk size = 42% of total disk size
115 /dev/sda3 instance partition size 53% of remaining total disk size = 47% of total disk size
116 """
117
118
119 JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR = """
120 {
121     {% for host in hosts %}
122     "{{ host.name }}": {
123         "ext_ceph_user": "{{ ext_ceph_user }}",
124         "ext_ceph_user_key": "{{ ext_ceph_user_key }}",
125         "cephkeys_access_group": "cephkeys",
126
127         "ceph_mons": [
128             {% for host in hosts %}
129                 "{{ host.name }}"
130                 {% if not loop.last %},{% endif %}
131             {% endfor %}],
132
133         "ext_ceph_fsid": "{{ ext_ceph_fsid }}",
134         "ext_ceph_mon_hosts": "{{ ext_ceph_mon_hosts }}",
135
136         "cinder_service_hostname": "{{ host.name }}",
137         "cinder_backends": {
138             "rbd": {
139                 "volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
140                 "rbd_pool": "{{ cinder_pool_name }}",
141                 "rbd_ceph_conf": "/etc/ceph/ceph.conf",
142                 "ceph_conf": "/etc/ceph/ceph.conf",
143                 "rbd_flatten_volume_from_snapshot": "false",
144                 "rbd_max_clone_depth": "5",
145                 "rbd_store_chunk_size": "4",
146                 "rados_connect_timeout": "-1",
147                 "volume_backend_name": "RBD",
148                 "rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
149                 "rbd_user": "{{ ext_ceph_user }}",
150                 "backend_host": "controller",
151                 "rbd_exclusive_cinder_pool": "True"
152             }
153         },
154
155         "ext_openstack_pools": [
156             "{{ glance_pool_name }}",
157             "{{ cinder_pool_name }}",
158             "{{ nova_pool_name }}",
159             "{{ platform_pool_name }}"
160         ],
161
162         "cinder_ceph_client": "{{ ext_ceph_user }}",
163         "nova_ceph_client": "{{ ext_ceph_user }}",
164
165         "glance_default_store": "rbd",
166         "glance_additional_stores": ["http", "cinder", "file"],
167         "glance_rbd_store_pool": "{{ glance_pool_name }}",
168         "glance_rbd_store_chunk_size": "8",
169         "glance_ceph_client": "{{ ext_ceph_user }}",
170         "ceph_conf": "/etc/ceph/ceph.conf"
171
172     } {% if not loop.last %},{% endif %}
173     {% endfor %}
174 }
175 """
176
177 JSON_CINDER_BACKENDS_HOST_VAR = """
178 {
179     {%- set loopvar = {'first_entry': True} %}
180     {% for host in hosts %}
181     {% if host.is_controller %}
182     {%- if not loopvar.first_entry %},{%- endif %}
183     {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
184     "{{ host.name }}": {
185         "cinder_service_hostname": "{{ host.name }}",
186         "cinder_backends": {
187             {% if openstack_storage == 'ceph' %}
188             "rbd": {
189                 "volume_driver": "cinder.volume.drivers.rbd.RBDDriver",
190                 "rbd_pool": "{{ cinder_pool_name }}",
191                 "rbd_ceph_conf": "/etc/ceph/ceph.conf",
192                 "ceph_conf": "/etc/ceph/ceph.conf",
193                 "rbd_flatten_volume_from_snapshot": "false",
194                 "rbd_max_clone_depth": "5",
195                 "rbd_store_chunk_size": "4",
196                 "rados_connect_timeout": "-1",
197                 "volume_backend_name": "volumes_hdd",
198                 "rbd_secret_uuid": "{{ cinder_ceph_client_uuid }}",
199                 "rbd_user": "cinder",
200                 "backend_host": "controller",
201                 "rbd_exclusive_cinder_pool": "True"
202             }
203             {% endif %}
204             {% if openstack_storage == 'lvm' %}
205             "lvm": {
206                 "iscsi_ip_address": "{{ installation_controller_ip }}",
207                 "volume_backend_name": "LVM_iSCSI",
208                 "volume_driver": "cinder.volume.drivers.lvm.LVMVolumeDriver",
209                 "volume_group": "cinder-volumes"
210             }
211             {% endif %}
212         }
213     }
214     {% endif %}
215     {% endfor %}
216 }
217 """
218
219 JSON_STORAGE_HOST_VAR = """
220 {
221     {%- set loopvar = {'first_entry': True} %}
222     {% for host in hosts %}
223     {% if host.is_rbd_ceph %}
224     {%- if not loopvar.first_entry %},{%- endif %}
225     {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
226     "{{ host.name }}": {
227          "devices": [
228              {% for disk in host.ceph_osd_disks %}
229                  "{{disk}}"
230                  {%if not loop.last %},{% endif %}{% endfor %}]
231     }
232     {% endif %}
233     {% endfor %}
234 }
235 """
236
237 JSON_STORAGE_HOST_DISK_CONFIGURATION = """
238 {
239     {% for host in hosts %}
240     "{{ host.name }}": {
241          "by_path_disks":
242              { "os" : "{{ host.os_disk }}",
243                "osd" : "{{ host.ceph_osd_disks }}",
244                "osd_disks_ids" : "{{ host.osd_disks_ids }}"
245              },
246          "rootdisk_vg_percentage": "{{ host.vg_percentage }}",
247          "default_rootdisk_device": "{{ rootdisk_device }}"
248     } {% if not loop.last %},{% endif %}
249     {% endfor %}
250 }
251 """
252
253
254 JSON_LVM_STORAGE_HOST_VAR = """
255 {
256     {% for host in hosts %}
257     "{{ host.name }}": {
258          "devices": [
259              {% for disk in host.cinder_disks %}
260              "{{disk}}"
261              {%if not loop.last %},{% endif %}{% endfor %}],
262          "cinder_physical_volumes": [
263              {% for disk in host.cinder_physical_volumes %}
264              "{{disk}}"
265              {%if not loop.last %},{% endif %}{% endfor %}]
266     } {% if not loop.last %},{% endif %}
267     {% endfor %}
268 }
269 """
270
271
272 JSON_BARE_LVM_STORAGE_HOST_VAR = """
273 {
274     {% for host in hosts %}
275     "{{ host.name }}": {
276         {% if host.is_bare_lvm %}
277         "bare_lvm": {
278             "disks": [
279                 {% for disk in host.bare_lvm_disks %}
280                     "{{disk}}"
281                     {%if not loop.last %},{% endif %}{% endfor %}],
282             "physical_volumes": [
283                 {% for disk in host.bare_lvm_physical_volumes %}
284                     "{{disk}}"
285                     {%if not loop.last %},{% endif %}{% endfor %}],
286             "mount_options": "{{ host.mount_options }}",
287             "mount_dir": "{{ host.mount_dir }}",
288             "name": "{{ host.bare_lvm_lv_name }}"
289         }
290         {% endif %}
291     } {% if not loop.last %},{% endif %}
292     {% endfor %}
293 }
294 """
295
296 JSON_DEVICE_HOST_VAR = """
297 {
298     {%- set loopvar = {'first_entry': True} %}
299     {% for host in hosts %}
300     {% if host.instance_physical_volumes %}
301     {%- if not loopvar.first_entry %},{%- endif %}
302     {%- if loopvar.update({'first_entry': False}) %}{%- endif %}
303     "{{ host.name }}": {
304          "instance_disks": [
305              {% for disk in host.instance_disks %}
306                  "{{disk}}"
307                  {%if not loop.last %},{% endif %}
308              {% endfor %}],
309          "instance_physical_volumes": [
310              {% for disk in host.instance_physical_volumes %}
311                  "{{disk}}"
312                  {%if not loop.last %},{% endif %}
313              {% endfor %}],
314          "instance_lv_percentage": "{{ host.instance_lv_percentage }}"
315     }
316     {% endif %}
317     {% endfor %}
318 }
319 """
320
321 # /etc/ansible/roles/os_nova/templates/nova.conf.j2
322 JSON_NOVA_RBD_HOST_VAR = """
323 {
324     {% for host in hosts %}
325     "{{ host.name }}": {
326          "nova_libvirt_images_rbd_pool": "{{ nova_pool_name }}",
327          "nova_ceph_client": "{{ nova_ceph_client }}"
328     } {% if not loop.last %},{% endif %}
329     {% endfor %}
330 }
331 """
332
333
334 #
335 # /opt/ceph-ansible/group_vars/osds.yml
336 JSON_OVERRIDE = """
337 {
338     "ceph_conf_overrides": {
339         "global": {
340             "mon_max_pg_per_osd": "400",
341             "mon_pg_warn_max_object_skew": "-1",
342             "osd_pool_default_size": "{{ osd_pool_default_size }}",
343             "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
344             "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
345             "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
346             "osd_heartbeat_grace": "3",
347             "osd_heartbeat_interval": "2",
348             "mon_osd_min_down_reporters": "1",
349             "mon_osd_adjust_heartbeat_grace": "false",
350             "auth_client_required": "cephx"
351         },
352         "mgr": {
353             "mgr_modules": "dashboard"
354         },
355         "mon": {
356             "mon_health_preluminous_compat_warning": "false",
357             "mon_health_preluminous_compat": "true",
358             "mon_timecheck_interval": "60",
359             "mon_sd_reporter_subtree_level": "device",
360             "mon_clock_drift_allowed": "0.1"
361         },
362         "osd": {
363             "osd_mon_heartbeat_interval": "10",
364             "osd_mon_report_interval_min": "1",
365             "osd_mon_report_interval_max": "15"
366         }
367     }
368 }
369 """
370 JSON_OVERRIDE_CACHE = """
371 {
372     "ceph_conf_overrides": {
373         "global": {
374             "mon_max_pg_per_osd": "400",
375             "mon_pg_warn_max_object_skew": "-1",
376             "osd_pool_default_size": "{{ osd_pool_default_size }}",
377             "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
378             "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
379             "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
380             "osd_heartbeat_grace": "3",
381             "osd_heartbeat_interval": "2",
382             "mon_osd_adjust_heartbeat_grace": "false",
383             "bluestore_cache_size": "1073741824",
384             "auth_client_required": "cephx"
385         },
386         "mgr": {
387             "mgr_modules": "dashboard"
388         },
389         "mon": {
390             "mon_health_preluminous_compat_warning": "false",
391             "mon_health_preluminous_compat": "true",
392             "mon_timecheck_interval": "60",
393             "mon_sd_reporter_subtree_level": "device",
394             "mon_clock_drift_allowed": "0.1"
395         },
396         "osd": {
397             "osd_mon_heartbeat_interval": "10",
398             "osd_mon_report_interval_min": "1",
399             "osd_mon_report_interval_max": "15"
400         }
401     }
402 }
403 """
404 JSON_OVERRIDE_3CONTROLLERS = """
405 {
406     "ceph_conf_overrides": {
407         "global": {
408             "mon_max_pg_per_osd": "400",
409             "mon_pg_warn_max_object_skew": "-1",
410             "osd_pool_default_size": "{{ osd_pool_default_size }}",
411             "osd_pool_default_min_size": "{{ osd_pool_default_min_size }}",
412             "osd_pool_default_pg_num": "{{ osd_pool_default_pg_num }}",
413             "osd_pool_default_pgp_num": "{{ osd_pool_default_pg_num }}",
414             "osd_heartbeat_grace": "3",
415             "osd_heartbeat_interval": "2",
416             "mon_osd_adjust_heartbeat_grace": "false",
417             "bluestore_cache_size": "1073741824",
418             "auth_client_required": "cephx"
419         },
420         "mgr": {
421             "mgr_modules": "dashboard"
422         },
423         "mon": {
424             "mon_health_preluminous_compat_warning": "false",
425             "mon_health_preluminous_compat": "true",
426             "mon_lease": "1.0",
427             "mon_election_timeout": "2",
428             "mon_lease_renew_interval_factor": "0.4",
429             "mon_lease_ack_timeout_factor": "1.5",
430             "mon_timecheck_interval": "60",
431             "mon_sd_reporter_subtree_level": "device",
432             "mon_clock_drift_allowed": "0.1"
433         },
434         "osd": {
435             "osd_mon_heartbeat_interval": "10",
436             "osd_mon_report_interval_min": "1",
437             "osd_mon_report_interval_max": "15"
438         }
439     }
440 }
441 """
442
443 JSON_NETWORK = """
444 {
445     "public_network": "{{ public_networks }}",
446     "cluster_network": "{{ cluster_networks }}"
447 }
448 """
449
450 JSON_OS_TUNING = """
451 {
452     "os_tuning_params": [{
453         "name": "vm.min_free_kbytes",
454         "value": "1048576"
455     }]
456 }
457 """
458
459 JSON_OSD_POOL_PGNUMS = """
460 {
461     "osd_pool_images_pg_num": "{{ osd_pool_images_pg_num }}",
462     "osd_pool_volumes_pg_num": "{{ osd_pool_volumes_pg_num }}",
463     "osd_pool_vms_pg_num": "{{ osd_pool_vms_pg_num }}",
464     "osd_pool_shared_pg_num": "{{ osd_pool_shared_pg_num }}"{%- if 0 < osd_pool_caas_pg_num %},
465     "osd_pool_caas_pg_num": "{{ osd_pool_caas_pg_num }}"
466 {% endif %}
467 }
468 """
469
470 JSON_CEPH_HOSTS = """
471 {
472     "ceph-mon": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
473     "ceph-mon_hosts": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
474     "mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
475     "ceph_mons": [ {% for host in mons %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
476     "ceph-osd": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
477     "ceph-osd_hosts": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
478     "osds": [ {% for host in osds %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
479     "mgrs": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ],
480     "ceph-mgr": [ {% for host in mgrs %}"{{ host.name }}"{% if not loop.last %},{% endif %}{% endfor %} ]
481 }
482 """
483 #    "storage_backend": ceph
484
485
486 # Replaces variables in /opt/openstack-ansible/playbooks/inventory/group_vars/glance_all.yml
487 JSON_GLANCE_CEPH_ALL_GROUP_VARS = """
488 {
489     {% for host in hosts %}
490     "{{ host.name }}": {
491         "glance_default_store": "rbd",
492         "glance_additional_stores": ["http", "cinder", "file"],
493         "glance_rbd_store_pool": "{{ glance_pool_name }}",
494         "glance_rbd_store_chunk_size": "8",
495         "ceph_conf": "/etc/ceph/ceph.conf"
496     } {% if not loop.last %},{% endif %}
497     {% endfor %}
498 }
499 """
500
501 JSON_GLANCE_LVM_ALL_GROUP_VARS = """
502 {
503     {% for host in hosts %}
504     "{{ host.name }}": {
505         "glance_default_store": "file"
506     } {% if not loop.last %},{% endif %}
507     {% endfor %}
508 }
509 """
510
511 # ceph-ansible variables must be set at host_vars -level
512 # ceph-ansible sample variables in group_vars
513 # group_vars - all.yml.sample
514 JSON_CEPH_ANSIBLE_ALL_HOST_VARS = """
515 {
516     {% for host in hosts %}
517     "{{ host.name }}": {
518          "mon_group_name": "mons",
519          "osd_group_name": "osds",
520          "mgr_group_name": "mgrs",
521          "ceph_stable_release": "luminous",
522          "generate_fsid": "true",
523          "cephx": "true",
524          "journal_size": "10240",
525          "osd_objectstore": "bluestore"
526     } {% if not loop.last %},{% endif %}
527     {% endfor %}
528 }
529 """
530
531 # pylint: disable=line-too-long
532 # ceph-ansible
533 # group_vars - mons.yml.sample
534 JSON_CEPH_ANSIBLE_MONS_HOST_VARS = """
535 {
536     {% for host in hosts %}
537     "{{ host.name }}": {
538          "monitor_secret": "{{ '{{ monitor_keyring.stdout }}' }}",
539          "openstack_config": true,
540          "cephkeys_access_group": "cephkeys",
541          "openstack_pools": [
542              {
543                  "name": "{{ platform_pool }}",
544                  "pg_num": "{{ osd_pool_shared_pg_num }}",
545                  "rule_name": ""
546              }{% if is_openstack_deployment %},
547              {
548                  "name": "{{ glance_pool }}",
549                  "pg_num": "{{ osd_pool_images_pg_num }}",
550                  "rule_name": ""
551              },
552              {
553                  "name": "{{ cinder_pool }}",
554                  "pg_num": "{{ osd_pool_volumes_pg_num }}",
555                  "rule_name": ""
556              },
557              {
558                  "name": "{{ nova_pool }}",
559                  "pg_num": "{{ osd_pool_vms_pg_num }}",
560                  "rule_name": ""
561              }
562         {%- endif %}
563         {%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
564              {
565                  "name": "caas",
566                  "pg_num": "{{ osd_pool_caas_pg_num }}",
567                  "rule_name": ""
568              }
569         {%- endif %}
570          ],
571          "openstack_keys": [
572              {
573                  "acls": [],
574                  "key": "{{ ceph_keys['client.shared'] }}",
575                  "mode": "0600",
576                  "mon_cap": "allow r",
577                  "name": "client.shared",
578                  "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ platform_pool }}"
579              }{% if is_openstack_deployment %},
580              {
581                  "acls": [],
582                  "key": "{{ ceph_keys['client.glance'] }}",
583                  "mode": "0640",
584                  "mon_cap": "allow r",
585                  "name": "client.glance",
586                  "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ glance_pool }}"
587              },
588              {
589                  "acls": [],
590                  "key": "{{ ceph_keys['client.cinder'] }}",
591                  "mode": "0640",
592                  "mon_cap": "allow r, allow command \\\\\\\\\\\\\\"osd blacklist\\\\\\\\\\\\\\"",
593                  "name": "client.cinder",
594                  "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool={{ cinder_pool }}, allow rwx pool={{ nova_pool }}, allow rx pool={{ glance_pool }}"
595              }
596         {%- endif %}
597         {%- if is_caas_deployment and 0 < osd_pool_caas_pg_num %},
598              {
599                  "acls": [],
600                  "key": "{{ ceph_keys['client.caas'] }}",
601                  "mode": "0600",
602                  "mon_cap": "allow r",
603                  "name": "client.caas",
604                  "osd_cap": "allow class-read object_prefix rbd_children, allow rwx pool=caas"
605              }
606         {%- endif %}
607         ]
608     } {% if not loop.last %},{% endif %}
609     {% endfor %}
610 }
611 """
612 # pylint: enable=line-too-long
613
614 # ceph-ansible
615 # group_vars - osds.yml.sample
616 JSON_CEPH_ANSIBLE_OSDS_HOST_VARS = """
617 {
618     {% for host in hosts %}
619     "{{ host.name }}": {
620          "raw_journal_devices": [],
621          "journal_collocation": true,
622          "raw_multi_journal": false,
623          "dmcrytpt_journal_collocation": false,
624          "dmcrypt_dedicated_journal": false,
625          "osd_scenario": "collocated",
626          "dedicated_devices": []
627     } {% if not loop.last %},{% endif %}
628     {% endfor %}
629 }
630 """
631
632
633 JSON_SINGLE_CONTROLLER_VAR = """
634 {
635     {% for host in hosts %}
636     "{{ host.name }}": {
637          "single_controller_host": true
638     } {% if not loop.last %},{% endif %}
639     {% endfor %}
640 }
641 """
642
643
644 class Host(object):
645     def __init__(self):
646         self.name = None
647         self.is_lvm = None
648         self.is_osd = None
649         self.is_mon = None
650         self.is_mgr = None
651         self.is_rbd_ceph = None
652         self.ceph_osd_disks = []
653         self.lvm_disks = []
654         self.cinder_disks = []
655         self.is_controller = False
656         self.is_compute = False
657         self.is_storage = False
658         self.instance_physical_volumes = []
659         self.cinder_physical_volumes = []
660         self.instance_disks = []
661         self.instance_lv_percentage = ""
662         self.os_disk = ""
663         self.osd_disks_ids = []
664         self.vg_percentage = NOT_INSTANCE_NODE_VG_PERCENTAGE
665         self.mount_dir = ""
666         self.bare_lvm_disks = None
667         self.is_bare_lvm = None
668         self.bare_lvm_physical_volumes = None
669         self.mount_options = None
670         self.bare_lvm_lv_name = None
671
672
673 class storageinventory(cmansibleinventoryconfig.CMAnsibleInventoryConfigPlugin):
674
675     def __init__(self, confman, inventory, ownhost):
676         super(storageinventory, self).__init__(confman, inventory, ownhost)
677         self.hosts = []
678         self.storage_hosts = []
679         self.compute_hosts = []
680         self.controller_hosts = []
681         self._mon_hosts = []
682         self._osd_hosts = []
683         self._mgr_hosts = []
684         self.single_node_config = False
685         self._networking_config_handler = self.confman.get_networking_config_handler()
686         self._hosts_config_handler = self.confman.get_hosts_config_handler()
687         self._storage_config_handler = self.confman.get_storage_config_handler()
688         self._openstack_config_handler = self.confman.get_openstack_config_handler()
689         self._sp_config_handler = self.confman.get_storage_profiles_config_handler()
690         self._caas_config_handler = self.confman.get_caas_config_handler()
691         self._ceph_caas_pg_proportion = 0.0
692         self._ceph_openstack_pg_proportion = 0.0
693         self._ceph_keys_dict = None
694         self._cinder_pool_name = 'volumes'
695         self._glance_pool_name = 'images'
696         self._nova_pool_name = 'vms'
697         self._platform_pool_name = 'shared'
698         self._storage_profile_attribute_properties = {
699             'lvm_cinder_storage_partitions': {
700                 'backends': ['lvm'],
701                 'getter': self._sp_config_handler.get_profile_lvm_cinder_storage_partitions
702             },
703             'mount_options': {
704                 'backends': ['bare_lvm'],
705                 'getter': self._sp_config_handler.get_profile_bare_lvm_mount_options
706             },
707             'mount_dir': {
708                 'backends': ['bare_lvm'],
709                 'getter': self._sp_config_handler.get_profile_bare_lvm_mount_dir
710             },
711             'lv_name': {
712                 'backends': ['bare_lvm'],
713                 'getter': self._sp_config_handler.get_profile_bare_lvm_lv_name
714             },
715             'nr_of_ceph_osd_disks': {
716                 'backends': ['ceph'],
717                 'getter': self._sp_config_handler.get_profile_nr_of_ceph_osd_disks
718             },
719             'lvm_instance_storage_partitions': {
720                 'backends': ['lvm', 'bare_lvm'],
721                 'getter': self._sp_config_handler.get_profile_lvm_instance_storage_partitions
722             },
723             'lvm_instance_cow_lv_storage_percentage': {
724                 'backends': ['lvm'],
725                 'getter': self._sp_config_handler.get_profile_lvm_instance_cow_lv_storage_percentage
726             },
727             'openstack_pg_proportion': {
728                 'backends': ['ceph'],
729                 'getter': self._sp_config_handler.get_profile_ceph_openstack_pg_proportion
730             },
731             'caas_pg_proportion': {
732                 'backends': ['ceph'],
733                 'getter': self._sp_config_handler.get_profile_ceph_caas_pg_proportion
734             },
735         }
736
737     def _is_host_managment(self, host):
738         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_management_service_profile(), host)
739
740     def _is_host_controller(self, host):
741         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_controller_service_profile(), host)
742
743     def _is_profile_in_hosts_profiles(self, profile, host):
744         node_service_profiles = self._hosts_config_handler.get_service_profiles(host)
745         return profile in node_service_profiles
746
747     def _is_host_compute(self, host):
748         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_compute_service_profile(), host)
749
750     def _is_host_caas_master(self, host):
751         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_caasmaster_service_profile(), host)
752
753     def _is_host_storage(self, host):
754         return self._is_profile_in_hosts_profiles(profiles.Profiles.get_storage_service_profile(), host)
755
756     def _is_controller_has_compute(self):
757         if set.intersection(set(self.compute_hosts), set(self.controller_hosts)):
758             return True
759         return False
760
761     def _is_collocated_controller_node_config(self):
762         if set.intersection(set(self.storage_hosts), set(self.controller_hosts)):
763             return True
764         return False
765
766     def _is_collocated_3controllers_config(self):
767         if (self._is_collocated_controller_node_config() and
768                 (len(self.controller_hosts) == 3) and (len(self.hosts) == 3)):
769             return True
770         return False
771
772     def _is_dedicated_storage_config(self):
773         collocated_config = set.intersection(set(self.storage_hosts), set(self.controller_hosts))
774         if collocated_config and (collocated_config == set(self.controller_hosts)):
775             return False
776         elif self.storage_hosts:
777             return True
778         else:
779             return False
780
781     def handle_bootstrapping(self):
782         self.handle('bootstrapping')
783
784     def handle_provisioning(self):
785         self.handle('provisioning')
786
787     def handle_postconfig(self):
788         self.handle('postconfig')
789
790     def handle_setup(self):
791         pass
792
793     def _template_and_add_vars_to_hosts(self, template, **variables):
794         try:
795             text = Environment().from_string(template).render(variables)
796             if text:
797                 self._add_vars_for_hosts(text)
798         except Exception as exp:
799             raise cmerror.CMError(str(exp))
800
801     def _add_vars_for_hosts(self, inventory_text):
802         inventory = json.loads(inventory_text)
803         for host in inventory.keys():
804             for var, value in inventory[host].iteritems():
805                 self.add_host_var(host, var, value)
806
807     @staticmethod
808     def _read_cinder_ceph_client_uuid():
809         if os.path.isfile(USER_SECRETS):
810             d = dict(line.split(':', 1) for line in open(USER_SECRETS))
811             cinder_ceph_client_uuid = d['cinder_ceph_client_uuid'].strip()
812             return cinder_ceph_client_uuid
813         else:
814             raise cmerror.CMError("The file {} does not exist.".format(USER_SECRETS))
815
816     def _add_cinder_backends(self):
817         self._template_and_add_vars_to_hosts(
818             JSON_CINDER_BACKENDS_HOST_VAR,
819             hosts=self.controller_hosts,
820             installation_controller_ip=self._installation_host_ip,
821             cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
822             openstack_storage=self._openstack_config_handler.get_storage_backend(),
823             cinder_pool_name=self._cinder_pool_name)
824
825     def _add_external_ceph_cinder_backends(self):
826         handler = self._storage_config_handler
827         self._template_and_add_vars_to_hosts(
828             JSON_EXTERNAL_CEPH_CINDER_BACKEND_HOST_VAR,
829             hosts=self.hosts,
830             cinder_ceph_client_uuid=self._read_cinder_ceph_client_uuid(),
831             ext_ceph_user=handler.get_ext_ceph_ceph_user(),
832             ext_ceph_user_key=handler.get_ext_ceph_ceph_user_key(),
833             ext_ceph_fsid=handler.get_ext_ceph_fsid(),
834             ext_ceph_mon_hosts=", ".join(handler.get_ext_ceph_mon_hosts()),
835             nova_pool_name=self._nova_pool_name,
836             glance_pool_name=self._glance_pool_name,
837             cinder_pool_name=self._cinder_pool_name,
838             platform_pool_name=self._platform_pool_name)
839
840     def _add_storage_nodes_configs(self):
841         rbdhosts = []
842         for host in self.hosts:
843             if host.is_rbd_ceph:
844                 rbdhosts.append(host)
845         self._template_and_add_vars_to_hosts(JSON_STORAGE_HOST_VAR, hosts=rbdhosts)
846
847     def _add_hdd_storage_configs(self):
848         self._template_and_add_vars_to_hosts(
849             JSON_STORAGE_HOST_DISK_CONFIGURATION,
850             hosts=self.hosts,
851             rootdisk_device=DEFAULT_ROOTDISK_DEVICE)
852
853     def _add_lvm_storage_configs(self):
854         self._template_and_add_vars_to_hosts(JSON_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
855
856     def _add_bare_lvm_storage_configs(self):
857         self._template_and_add_vars_to_hosts(JSON_BARE_LVM_STORAGE_HOST_VAR, hosts=self.hosts)
858
859     def _add_instance_devices(self):
860         self._template_and_add_vars_to_hosts(JSON_DEVICE_HOST_VAR, hosts=self.compute_hosts)
861
862     def _add_ceph_hosts(self):
863         self._add_host_group(
864             Environment().from_string(JSON_CEPH_HOSTS).render(
865                 mons=self._mon_hosts,
866                 osds=self._osd_hosts,
867                 mgrs=self._mgr_hosts))
868
869         self._add_global_parameters(
870             Environment().from_string(JSON_CEPH_HOSTS).render(
871                 mons=self._mon_hosts,
872                 osds=self._osd_hosts,
873                 mgrs=self._mgr_hosts))
874
875     def _add_glance(self):
876         if self.is_ceph_backend:
877             self._template_and_add_vars_to_hosts(
878                 JSON_GLANCE_CEPH_ALL_GROUP_VARS,
879                 hosts=self.hosts,
880                 glance_pool_name=self._glance_pool_name)
881         elif self.is_lvm_backend:
882             self._template_and_add_vars_to_hosts(JSON_GLANCE_LVM_ALL_GROUP_VARS, hosts=self.hosts)
883
884     def _add_ceph_ansible_all_sample_host_vars(self):
885         self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_ALL_HOST_VARS, hosts=self.hosts)
886
887     def _add_ceph_ansible_mons_sample_host_vars(self):
888         self._template_and_add_vars_to_hosts(
889             JSON_CEPH_ANSIBLE_MONS_HOST_VARS,
890             hosts=self.hosts,
891             **self._get_ceph_vars())
892
893     @property
894     def _ceph_keys(self):
895         if not self._ceph_keys_dict:
896             try:
897                 self._ceph_keys_dict = {
898                     'client.shared': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip(),
899                     'client.glance': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip(),
900                     'client.cinder': subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip(),
901                     'client.caas':   subprocess.check_output(["ceph-authtool", "--gen-print-key"]).strip()
902                 }
903             except Exception as exp:
904                 raise cmerror.CMError(str(exp))
905
906         return self._ceph_keys_dict
907
908     def _get_ceph_vars(self):
909         return {
910             'osd_pool_images_pg_num':  self._calculated_images_pg_num,
911             'osd_pool_volumes_pg_num': self._calculated_volumes_pg_num,
912             'osd_pool_vms_pg_num':     self._calculated_vms_pg_num,
913             'osd_pool_shared_pg_num':  self._calculated_shared_pg_num,
914             'osd_pool_caas_pg_num':    self._calculated_caas_pg_num,
915             'is_openstack_deployment': self._is_openstack_deployment,
916             'is_caas_deployment':      self._is_caas_deployment,
917             'is_hybrid_deployment':    self._is_hybrid_deployment,
918             'nova_pool':               self._nova_pool_name,
919             'glance_pool':             self._glance_pool_name,
920             'cinder_pool':             self._cinder_pool_name,
921             'platform_pool':           self._platform_pool_name,
922             'ceph_keys':               self._ceph_keys
923         }
924
925     def _add_ceph_ansible_osds_sample_host_vars(self):
926         self._template_and_add_vars_to_hosts(JSON_CEPH_ANSIBLE_OSDS_HOST_VARS, hosts=self.hosts)
927
928     def _add_nova(self):
929         if self.is_external_ceph_backend:
930             nova_ceph_client = self._storage_config_handler.get_ext_ceph_ceph_user()
931         else:
932             nova_ceph_client = 'cinder'
933
934         self._template_and_add_vars_to_hosts(
935             JSON_NOVA_RBD_HOST_VAR, hosts=self.compute_hosts,
936             nova_pool_name=self._nova_pool_name,
937             nova_ceph_client=nova_ceph_client)
938
939     def _add_single_controller_host_var(self):
940         self._template_and_add_vars_to_hosts(
941             JSON_SINGLE_CONTROLLER_VAR, hosts=self.controller_hosts)
942
943     def _add_global_parameters(self, text):
944         try:
945             inventory = json.loads(text)
946             for var, value in inventory.iteritems():
947                 self.add_global_var(var, value)
948         except Exception as exp:
949             raise cmerror.CMError(str(exp))
950
951     def _add_host_group(self, text):
952         try:
953             inventory = json.loads(text)
954             for var, value in inventory.iteritems():
955                 self.add_host_group(var, value)
956         except Exception as exp:
957             raise cmerror.CMError(str(exp))
958
959     @property
960     def cluster_network_cidrs(self):
961         cidrs = []
962         network = self._networking_config_handler.get_infra_storage_cluster_network_name()
963         for domain in self._networking_config_handler.get_network_domains(network):
964             cidrs.append(self._networking_config_handler.get_network_cidr(network, domain))
965         return ','.join(cidrs)
966
967     @property
968     def public_network_cidrs(self):
969         cidrs = set()
970         cluster_network = self._networking_config_handler.get_infra_storage_cluster_network_name()
971         public_network = self._networking_config_handler.get_infra_internal_network_name()
972         for domain in self._networking_config_handler.get_network_domains(cluster_network):
973             cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
974         for host in self._mon_hosts:
975             domain = self._hosts_config_handler.get_host_network_domain(host.name)
976             cidrs.add(self._networking_config_handler.get_network_cidr(public_network, domain))
977         return ','.join(cidrs)
978
979     def _add_networks(self):
980         self._add_global_parameters(
981             Environment().from_string(JSON_NETWORK).render(
982                 public_networks=self.public_network_cidrs,
983                 cluster_networks=self.cluster_network_cidrs))
984
985     def _add_monitor_address(self):
986         infra_storage_network = self._networking_config_handler.get_infra_internal_network_name()
987         for host in self._mon_hosts:
988             monitor_address = \
989                 self._networking_config_handler.get_host_ip(host.name, infra_storage_network)
990             self.add_host_var(host.name, "monitor_address", monitor_address)
991
992     def _add_override_settings(self):
993         ceph_osd_pool_size = self._storage_config_handler.get_ceph_osd_pool_size()
994
995         if self._is_collocated_3controllers_config():
996             self._add_global_parameters(
997                 Environment().from_string(JSON_OVERRIDE_3CONTROLLERS).render(
998                     osd_pool_default_size=ceph_osd_pool_size,
999                     osd_pool_default_min_size=str(ceph_osd_pool_size-1),
1000                     osd_pool_default_pg_num=self._calculated_default_pg_num))
1001
1002             self._add_global_parameters(
1003                 Environment().from_string(JSON_OS_TUNING).render())
1004
1005         elif self._is_controller_has_compute():
1006             self._add_global_parameters(
1007                 Environment().from_string(JSON_OVERRIDE_CACHE).render(
1008                     osd_pool_default_size=ceph_osd_pool_size,
1009                     osd_pool_default_min_size=str(ceph_osd_pool_size-1),
1010                     osd_pool_default_pg_num=self._calculated_default_pg_num))
1011
1012             self._add_global_parameters(
1013                 Environment().from_string(JSON_OS_TUNING).render())
1014         else:
1015             self._add_global_parameters(
1016                 Environment().from_string(JSON_OVERRIDE).render(
1017                     osd_pool_default_size=ceph_osd_pool_size,
1018                     osd_pool_default_min_size=str(ceph_osd_pool_size-1),
1019                     osd_pool_default_pg_num=self._calculated_default_pg_num))
1020
1021     def _calculate_pg_num(self, pool_data_percentage):
1022         pgnum = PGNum(self._total_number_of_osds,
1023                       pool_data_percentage,
1024                       self._number_of_replicas)
1025         return pgnum.calculate()
1026
1027     @property
1028     def _calculated_default_pg_num(self):
1029         return self._calculate_pg_num(self._pool_data_percentage)
1030
1031     @property
1032     def _calculated_volumes_pg_num(self):
1033         return self._calculate_pg_num(
1034             OSD_POOL_VOLUMES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1035
1036     @property
1037     def _calculated_images_pg_num(self):
1038         return self._calculate_pg_num(
1039             OSD_POOL_IMAGES_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1040
1041     @property
1042     def _calculated_vms_pg_num(self):
1043         return self._calculate_pg_num(
1044             OSD_POOL_VMS_PG_NUM_PERCENTAGE * self._ceph_openstack_pg_proportion)
1045
1046     @property
1047     def _calculated_shared_pg_num(self):
1048         return self._calculate_pg_num(
1049             OSD_POOL_SHARED_PG_NUM_PERCENTAGE)
1050
1051     @property
1052     def _calculated_caas_pg_num(self):
1053         if self._ceph_caas_pg_proportion > 0:
1054             return self._calculate_pg_num(
1055                 (OSD_POOL_CAAS_PG_NUM_PERCENTAGE - OSD_POOL_SHARED_PG_NUM_PERCENTAGE) *
1056                 self._ceph_caas_pg_proportion)
1057         return 0
1058
1059     def _add_osd_pool_pg_nums(self):
1060         self._add_global_parameters(
1061             Environment().from_string(JSON_OSD_POOL_PGNUMS).render(**self._get_ceph_vars()))
1062
1063     @property
1064     def _installation_host(self):
1065         return self._hosts_config_handler.get_installation_host()
1066
1067     @property
1068     def _infra_internal_network_name(self):
1069         return self._networking_config_handler.get_infra_internal_network_name()
1070
1071     @property
1072     def _installation_host_ip(self):
1073         return self._networking_config_handler.get_host_ip(
1074             self._installation_host, self._infra_internal_network_name)
1075
1076     @property
1077     def is_ceph_backend(self):
1078         return self._storage_config_handler.is_ceph_enabled()
1079
1080     @property
1081     def is_external_ceph_backend(self):
1082         return (self._storage_config_handler.is_external_ceph_enabled() and
1083                 self._ceph_is_openstack_storage_backend)
1084
1085     def _set_external_ceph_pool_names(self):
1086         if self.is_external_ceph_backend:
1087             h = self._storage_config_handler
1088             self._nova_pool_name = h.get_ext_ceph_nova_pool()
1089             self._cinder_pool_name = h.get_ext_ceph_cinder_pool()
1090             self._glance_pool_name = h.get_ext_ceph_glance_pool()
1091             self._platform_pool_name = h.get_ext_ceph_platform_pool()
1092
1093     @property
1094     def _lvm_is_openstack_storage_backend(self):
1095         return True if self._openstack_config_handler.get_storage_backend() == 'lvm' else False
1096
1097     @property
1098     def _ceph_is_openstack_storage_backend(self):
1099         return True if self._openstack_config_handler.get_storage_backend() == 'ceph' else False
1100
1101     @property
1102     def is_lvm_backend(self):
1103         return (self._storage_config_handler.is_lvm_enabled() and
1104                 self._lvm_is_openstack_storage_backend)
1105
1106     @property
1107     def instance_default_backend(self):
1108         return self._openstack_config_handler.get_instance_default_backend()
1109
1110     @property
1111     def _hosts_with_ceph_storage_profile(self):
1112         # return filter(lambda host: host.is_rbd, self.hosts)
1113         return [host for host in self.hosts if host.is_rbd_ceph]
1114
1115     @property
1116     def _is_openstack_deployment(self):
1117         return self._caas_config_handler.is_openstack_deployment()
1118
1119     @property
1120     def _is_caas_deployment(self):
1121         return self._caas_config_handler.is_caas_deployment()
1122
1123     @property
1124     def _is_hybrid_deployment(self):
1125         return self._caas_config_handler.is_hybrid_deployment()
1126
1127     def handle(self, phase):
1128         self._init_jinja_environment()
1129         self.add_global_var("external_ceph_configured", self.is_external_ceph_backend)
1130         self.add_global_var("ceph_configured", self.is_ceph_backend)
1131         self.add_global_var("lvm_configured", self.is_lvm_backend)
1132         if phase == 'bootstrapping':
1133             self._add_hdd_storage_configs()
1134         else:
1135             self._add_hdd_storage_configs()
1136             if self.is_external_ceph_backend:
1137                 self._set_external_ceph_pool_names()
1138                 self._add_external_ceph_cinder_backends()
1139             else:
1140                 if self._is_openstack_deployment:
1141                     self._add_cinder_backends()
1142                     self._add_glance()
1143
1144             ceph_hosts = self._hosts_with_ceph_storage_profile
1145             if ceph_hosts:
1146                 self._set_ceph_pg_proportions(ceph_hosts)
1147                 self._add_ceph_ansible_all_sample_host_vars()
1148                 self._add_ceph_ansible_mons_sample_host_vars()
1149                 self._add_ceph_ansible_osds_sample_host_vars()
1150                 self._add_ceph_hosts()
1151                 self._add_storage_nodes_configs()
1152                 self._add_monitor_address()
1153                 self._add_override_settings()
1154                 self._add_osd_pool_pg_nums()
1155                 self._add_networks()
1156                 self.add_global_var("cinder_ceph_client_uuid", self._read_cinder_ceph_client_uuid())
1157             if self.is_lvm_backend:
1158                 self._add_lvm_storage_configs()
1159             self._add_bare_lvm_storage_configs()
1160
1161             self.add_global_var("instance_default_backend", self.instance_default_backend)
1162             self.add_global_var("storage_single_node_config", self.single_node_config)
1163             self.add_global_var("one_controller_node_config", self._is_one_controller_node_config)
1164             if self._is_one_controller_node_config:
1165                 self._add_single_controller_host_var()
1166             self.add_global_var("collocated_controller_node_config",
1167                                 self._is_collocated_controller_node_config())
1168             self.add_global_var("dedicated_storage_node_config",
1169                                 self._is_dedicated_storage_config())
1170             self.add_global_var("storage_one_controller_multi_nodes_config",
1171                                 self._is_one_controller_multi_nodes_config)
1172             if self.instance_default_backend == 'rbd':
1173                 self._add_nova()
1174             elif self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1175                 self._add_instance_devices()
1176
1177     def _set_ceph_pg_proportions(self, ceph_hosts):
1178         # FIXME: First storage host's storage profile assumed to get pg proportion values
1179         hostname = ceph_hosts[0].name
1180         if self._is_hybrid_deployment:
1181             self._ceph_openstack_pg_proportion = self._get_ceph_openstack_pg_proportion(hostname)
1182             self._ceph_caas_pg_proportion = self._get_ceph_caas_pg_proportion(hostname)
1183         elif self._is_openstack_deployment:
1184             self._ceph_openstack_pg_proportion = 1.0
1185             self._ceph_caas_pg_proportion = 0.0
1186         elif self._is_caas_deployment:
1187             self._ceph_openstack_pg_proportion = 0.0
1188             self._ceph_caas_pg_proportion = 1.0
1189
1190     def _init_host_data(self):
1191         hosts = self._hosts_config_handler.get_enabled_hosts()
1192         self.single_node_config = True if len(hosts) == 1 else False
1193         for name in hosts:
1194             host = self._initialize_host_object(name)
1195             self.hosts.append(host)
1196             if host.is_osd:
1197                 self._osd_hosts.append(host)
1198             if host.is_mon:
1199                 self._mon_hosts.append(host)
1200             if host.is_mgr:
1201                 self._mgr_hosts.append(host)
1202
1203         for host in self.hosts:
1204             if host.is_compute:
1205                 self.compute_hosts.append(host)
1206             if host.is_controller:
1207                 self.controller_hosts.append(host)
1208             if host.is_storage:
1209                 self.storage_hosts.append(host)
1210
1211     @property
1212     def _number_of_osd_hosts(self):
1213         return len(self._osd_hosts)
1214
1215     @property
1216     def _is_one_controller_multi_nodes_config(self):
1217         if len(self.controller_hosts) == 1 and not self.single_node_config:
1218             return True
1219         return False
1220
1221     @property
1222     def _is_one_controller_node_config(self):
1223         if len(self.controller_hosts) == 1:
1224             return True
1225         return False
1226
1227     @property
1228     def _number_of_osds_per_host(self):
1229         first_osd_host = self._osd_hosts[0].name
1230         return self._get_nr_of_ceph_osd_disks(first_osd_host)
1231
1232     @property
1233     def _total_number_of_osds(self):
1234         return self._number_of_osds_per_host * self._number_of_osd_hosts
1235
1236     @property
1237     def _number_of_pools(self):
1238         """TODO: Get dynamically"""
1239         return NUMBER_OF_POOLS
1240
1241     @property
1242     def _pool_data_percentage(self):
1243         return float(1.0 / self._number_of_pools)
1244
1245     @property
1246     def _number_of_replicas(self):
1247         num = self._storage_config_handler.get_ceph_osd_pool_size()
1248         return 2 if num == 0 else num
1249
1250     def _init_jinja_environment(self):
1251         self._init_host_data()
1252
1253     def _is_backend_configured(self, backend, host_name):
1254         try:
1255             if self._get_storage_profile_for_backend(host_name, backend):
1256                 return True
1257             return False
1258         except configerror.ConfigError:
1259             return False
1260
1261     def _get_storage_profile_for_backend(self, host_name, *backends):
1262         storage_profiles = self._hosts_config_handler.get_storage_profiles(host_name)
1263         sp_handler = self._sp_config_handler
1264         for storage_profile in storage_profiles:
1265             if sp_handler.get_profile_backend(storage_profile) in backends:
1266                 return storage_profile
1267         return None
1268
1269     def _get_nr_of_ceph_osd_disks(self, host_name):
1270         return self._get_storage_profile_attribute(host_name, 'nr_of_ceph_osd_disks')
1271
1272     def _get_storage_profile_attribute(self, host_name, attribute):
1273         attribute_properties = self._storage_profile_attribute_properties[attribute]
1274         storage_profile = self._get_storage_profile_for_backend(host_name,
1275                                                                 *attribute_properties['backends'])
1276         if storage_profile:
1277             return attribute_properties['getter'](storage_profile)
1278         raise cmerror.CMError(str("Failed to get %s" % attribute))
1279
1280     def _get_ceph_openstack_pg_proportion(self, host_name):
1281         return self._get_storage_profile_attribute(host_name, 'openstack_pg_proportion')
1282
1283     def _get_ceph_caas_pg_proportion(self, host_name):
1284         return self._get_storage_profile_attribute(host_name, 'caas_pg_proportion')
1285
1286     def _get_lvm_instance_storage_partitions(self, host_name):
1287         try:
1288             if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1289                 return self._get_storage_profile_attribute(
1290                     host_name, 'lvm_instance_storage_partitions')
1291         except configerror.ConfigError:
1292             pass
1293
1294         if self.instance_default_backend not in ALL_DEFAULT_INSTANCE_BACKENDS:
1295             raise cmerror.CMError(
1296                 str("Unknown instance_default_backend %s "
1297                     "not supported" % self.instance_default_backend))
1298         return []
1299
1300     def _get_lvm_cinder_storage_partitions(self, host_name):
1301         return self._get_storage_profile_attribute(host_name, 'lvm_cinder_storage_partitions')
1302
1303     def _get_bare_lvm_mount_options(self, host_name):
1304         return self._get_storage_profile_attribute(host_name, 'mount_options')
1305
1306     def _get_bare_lvm_mount_dir(self, host_name):
1307         return self._get_storage_profile_attribute(host_name, 'mount_dir')
1308
1309     def _get_bare_lvm_lv_name(self, host_name):
1310         return self._get_storage_profile_attribute(host_name, 'lv_name')
1311
1312     def _get_instance_lv_percentage(self, host_name):
1313         try:
1314             if self.instance_default_backend in SUPPORTED_INSTANCE_BACKENDS:
1315                 return self._get_storage_profile_attribute(
1316                     host_name, 'lvm_instance_cow_lv_storage_percentage')
1317         except configerror.ConfigError:
1318             return DEFAULT_INSTANCE_LV_PERCENTAGE
1319         raise cmerror.CMError(str("Failed to found lvm from storage_profiles"))
1320
1321     def _is_osd_host(self, name):
1322         try:
1323             return bool(name in self._hosts_config_handler.get_service_profile_hosts('storage'))
1324         except configerror.ConfigError:
1325             return False
1326
1327     def _is_rbd_ceph_configured(self, host_name):
1328         return self._is_backend_configured('ceph', host_name)
1329
1330     def _is_lvm_configured(self, host_name):
1331         return self._is_backend_configured('lvm', host_name)
1332
1333     def _is_bare_lvm_configured(self, host_name):
1334         return self._is_backend_configured('bare_lvm', host_name)
1335
1336     def _get_hw_type(self, name):
1337         hwmgmt_addr = self._hosts_config_handler.get_hwmgmt_ip(name)
1338         hwmgmt_user = self._hosts_config_handler.get_hwmgmt_user(name)
1339         hwmgmt_pass = self._hosts_config_handler.get_hwmgmt_password(name)
1340         hwmgmt_priv_level = self._hosts_config_handler.get_hwmgmt_priv_level(name)
1341         return hw.get_hw_type(hwmgmt_addr, hwmgmt_user, hwmgmt_pass, hwmgmt_priv_level)
1342
1343     @staticmethod
1344     def _get_os_disk(hw_type):
1345         return hw.get_os_hd(hw_type)
1346
1347     def _get_osd_disks_for_embedded_deployment(self, host_name):
1348         return self._hosts_config_handler.get_ceph_osd_disks(host_name)
1349
1350     @staticmethod
1351     def _get_osd_disks(hw_type):
1352         return hw.get_hd_with_usage(hw_type, "osd")
1353
1354     def _by_path_disks(self, hw_type, nr_of_disks):
1355         return self._get_osd_disks(hw_type)[0:nr_of_disks]
1356
1357     @staticmethod
1358     def _is_by_path_disks(disk_list):
1359         return [disk for disk in disk_list if "by-path" in disk]
1360
1361     def _get_physical_volumes(self, disk_list):
1362         partition_nr = "1"
1363         if self._is_by_path_disks(disk_list):
1364             return [disk+"-part"+partition_nr for disk in disk_list]
1365         else:
1366             return [disk+partition_nr for disk in disk_list]
1367
1368     def _initialize_host_object(self, name):
1369         host = Host()
1370         host.name = name
1371         host.is_mgr = self._is_host_managment(host.name)
1372         host.is_controller = self._is_host_controller(host.name)
1373         host.is_compute = self._is_host_compute(host.name)
1374         host.is_storage = self._is_host_storage(host.name)
1375         host.is_rbd_ceph = self._is_rbd_ceph_configured(host.name)
1376         host.is_lvm = self._is_lvm_configured(host.name)
1377         host.is_bare_lvm = self._is_bare_lvm_configured(host.name)
1378         host.is_osd = self._is_osd_host(host.name)
1379         host.is_mon = host.is_mgr
1380         hw_type = self._get_hw_type(name)
1381         host.os_disk = self._get_os_disk(hw_type)
1382         if host.is_bare_lvm:
1383             partitions = self._get_lvm_instance_storage_partitions(host.name)
1384             host.bare_lvm_disks = self._by_path_disks(hw_type, len(partitions))
1385             host.bare_lvm_physical_volumes = self._get_physical_volumes(host.bare_lvm_disks)
1386             host.mount_options = self._get_bare_lvm_mount_options(host.name)
1387             host.mount_dir = self._get_bare_lvm_mount_dir(host.name)
1388             host.bare_lvm_lv_name = self._get_bare_lvm_lv_name(host.name)
1389
1390         if host.is_compute and self.instance_default_backend != 'rbd':
1391             host.vg_percentage = INSTANCE_NODE_VG_PERCENTAGE
1392
1393         if self.is_lvm_backend and host.is_controller:
1394             nr_of_cinder_disks = int(len(self._get_lvm_cinder_storage_partitions(host.name)))
1395             nr_of_nova_disks = int(len(self._get_lvm_instance_storage_partitions(host.name)))
1396             nr_of_all_disks = nr_of_cinder_disks + nr_of_nova_disks
1397             if nr_of_nova_disks > 0:
1398                 host.cinder_disks = \
1399                     self._by_path_disks(hw_type, nr_of_all_disks)[-nr_of_cinder_disks:]
1400             else:
1401                 host.cinder_disks = self._by_path_disks(hw_type, nr_of_cinder_disks)
1402             host.cinder_physical_volumes = self._get_physical_volumes(host.cinder_disks)
1403
1404         if host.is_rbd_ceph:
1405             nr_of_osd_disks = self._get_nr_of_ceph_osd_disks(host.name)
1406             if self._caas_config_handler.is_vnf_embedded_deployment():
1407                 host.ceph_osd_disks = \
1408                     self._get_osd_disks_for_embedded_deployment(host.name)[0:nr_of_osd_disks]
1409             else:
1410                 host.ceph_osd_disks = self._get_osd_disks(hw_type)[0:nr_of_osd_disks]
1411             host.osd_disks_ids = range(1, nr_of_osd_disks+1)
1412
1413         if host.is_lvm and host.is_compute:
1414             partitions = self._get_lvm_instance_storage_partitions(host.name)
1415             host.instance_disks = self._by_path_disks(hw_type, len(partitions))
1416             host.instance_physical_volumes = self._get_physical_volumes(host.instance_disks)
1417             host.instance_lv_percentage = self._get_instance_lv_percentage(host.name)
1418         return host