X-Git-Url: https://gerrit.akraino.org/r/gitweb?a=blobdiff_plain;f=site_type%2Fsriov-a13%2Ftemplates%2Fsoftware%2Fcharts%2Fosh%2Fopenstack-tenant-ceph%2Fceph-osd.j2;fp=site_type%2Fsriov-a13%2Ftemplates%2Fsoftware%2Fcharts%2Fosh%2Fopenstack-tenant-ceph%2Fceph-osd.j2;h=d88da7cc30f7e28be40507dea4496689d78f412e;hb=fbb206730195c6f03ded7658d08f1ef708ebf88b;hp=0000000000000000000000000000000000000000;hpb=3395a537e26721ec33a80f66686ca932f9328722;p=yaml_builds.git diff --git a/site_type/sriov-a13/templates/software/charts/osh/openstack-tenant-ceph/ceph-osd.j2 b/site_type/sriov-a13/templates/software/charts/osh/openstack-tenant-ceph/ceph-osd.j2 new file mode 100644 index 0000000..d88da7c --- /dev/null +++ b/site_type/sriov-a13/templates/software/charts/osh/openstack-tenant-ceph/ceph-osd.j2 @@ -0,0 +1,61 @@ +--- +# The purpose of this file is to define environment-specific parameters for +# ceph-osd +schema: armada/Chart/v1 +metadata: + schema: metadata/Document/v1 + name: tenant-ceph-osd + layeringDefinition: + abstract: false + layer: site + parentSelector: + name: tenant-ceph-osd-global + actions: + - method: replace + path: .values.conf.storage.osd + - method: merge + path: . + storagePolicy: cleartext +data: + values: + labels: + osd: + node_selector_key: tenant-ceph-osd + node_selector_value: enabled + conf: + storage: + failure_domain: "rack" + # NEWSITE-CHANGEME: The OSD count and configuration here should not need + # to change if your HW matches the HW used in this environment. + # Otherwise you may need to add or subtract disks to this list. + # no need to create below jounal partitons as ceph charts will create them + # default size of journal partions is 10GB + osd: +{% for osd in yaml.tenant_storage.osds %} + - data: + type: block-logical + location: {{osd.data}} + journal: + type: block-logical + location: {{osd.journal}} +{% endfor %} + overrides: + ceph_osd: + hosts: + - name: {{yaml.genesis.name}} + conf: + storage: + failure_domain_name: "{{yaml.genesis.name}}_rack" +{% for server in yaml.masters %} + - name: {{server.name}} + conf: + storage: + failure_domain_name: "{{server.name}}_rack" +{% endfor %} +{% if 'workers' in yaml %}{% for server in yaml.workers %} + - name: {{server.name}} + conf: + storage: + failure_domain_name: "{{server.name}}_rack" +{% endfor %}{% endif %} +...