--- # The purpose of this file is to define environment-specific parameters for # ceph-osd schema: armada/Chart/v1 metadata: schema: metadata/Document/v1 name: tenant-ceph-osd layeringDefinition: abstract: false layer: site parentSelector: name: tenant-ceph-osd-global actions: - method: replace path: .values.conf.storage.osd - method: merge path: . storagePolicy: cleartext data: values: labels: osd: node_selector_key: tenant-ceph-osd node_selector_value: enabled conf: storage: failure_domain: "rack" # NEWSITE-CHANGEME: The OSD count and configuration here should not need # to change if your HW matches the HW used in this environment. # Otherwise you may need to add or subtract disks to this list. # no need to create below jounal partitons as ceph charts will create them # default size of journal partions is 10GB osd: {% for osd in yaml.tenant_storage.osds %} - data: type: block-logical location: {{osd.data}} journal: type: block-logical location: {{osd.journal}} {% endfor %} overrides: ceph_osd: hosts: - name: {{yaml.genesis.name}} conf: storage: failure_domain_name: "{{yaml.genesis.name}}_rack" {% for server in yaml.masters %} - name: {{server.name}} conf: storage: failure_domain_name: "{{server.name}}_rack" {% endfor %} {% if 'workers' in yaml %}{% for server in yaml.workers %} - name: {{server.name}} conf: storage: failure_domain_name: "{{server.name}}_rack" {% endfor %}{% endif %} ...