--- /dev/null
+---
+##############################################################################
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may #
+# not use this file except in compliance with the License. #
+# #
+# You may obtain a copy of the License at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+##############################################################################
+
+site_name: akraino-ki20
+site_type: ovsdpdk-a13
+ipmi_admin:
+ username: root
+ password: calvin
+networks:
+ bonded: yes
+ primary: bond0
+ slaves:
+ - name: enp95s0f0
+ - name: enp95s0f1
+ oob:
+ vlan: 400
+ interface:
+ cidr: 10.51.35.128/27
+ netmask: 255.255.255.224
+ routes:
+ gateway: 10.51.35.129
+ ranges:
+ reserved:
+ start: 10.51.35.153
+ end: 10.51.35.158
+ static:
+ start: 10.51.35.132
+ end: 10.51.35.152
+ host:
+ vlan: 408
+ interface: bond0.408
+ cidr: 10.51.34.224/27
+ subnet: 10.51.34.224
+ netmask: 255.255.255.224
+ ingress_vip: 10.51.34.236
+ maas_vip: 10.51.34.235
+ routes:
+ gateway: 10.51.34.225
+ ranges:
+ reserved:
+ start: 10.51.34.226
+ end: 10.51.34.228
+ static:
+ start: 10.51.34.229
+ end: 10.51.34.236
+ storage:
+ vlan: 23
+ interface: bond0.23
+ cidr: 10.224.174.0/24
+ #netmask: 255.255.255.0 - Not Used
+ ranges:
+ reserved:
+ start: 10.224.174.1
+ end: 10.224.174.10
+ static:
+ start: 10.224.174.11
+ end: 10.224.174.254
+ pxe:
+ vlan: 407
+ interface: eno3
+ cidr: 10.224.168.0/24
+ #netmask: 255.255.255.0 - Not Used
+ gateway: 10.224.168.1
+ routes:
+ gateway: 10.224.168.11 #This address is the PXE of the Genesis Node.
+ ranges:
+ reserved:
+ start: 10.224.168.1
+ end: 10.224.168.10
+ static:
+ start: 10.224.168.11
+ end: 10.224.168.200
+ dhcp:
+ start: 10.224.168.201
+ end: 10.224.168.254
+ ksn:
+ vlan: 22
+ interface: bond0.22
+ cidr: 10.224.160.0/24
+ #netmask: 255.255.255.0 - Not Used
+ gateway: 10.224.160.1
+ local_asnumber: 65531
+ ranges:
+ reserved:
+ start: 10.224.160.1
+ end: 10.224.160.10
+ static:
+ start: 10.224.160.134
+ end: 10.224.160.254
+ additional_cidrs:
+ - 10.224.160.200/29
+ ingress_vip: 10.224.160.201/32
+# peers:
+# - ip: 10.224.160.129 #Old QFX VC VIP
+# - ip: 10.224.160.131
+# - ip: 10.224.160.130
+# scope: global
+# asnumber: 65001
+
+# peers:
+# - ip: 10.224.160.129 #Old QFX VC VIP
+# - ip: 10.224.160.131
+# - ip: 10.224.160.130
+# scope: global
+# asnumber: 65001
+
+# peers:
+# - ip: 10.224.160.129 #Old QFX VC VIP
+# - ip: 10.224.160.131
+# - ip: 10.224.160.130
+# scope: global
+# asnumber: 65001
+ vrrp_ip: 10.224.160.129 # keep peers ip address in case of only peer.
+ neutron:
+ vlan: 24
+ interface: bond0.24
+ cidr: 10.224.171.0/24
+ #netmask: 255.255.255.0 - Not Used
+ ranges:
+ reserved:
+ start: 10.224.171.1
+ end: 10.224.171.10
+ static:
+ start: 10.224.171.11
+ end: 10.224.171.254
+ vxlan:
+ vlan: 1
+ interface: enp134s0f0
+ cidr: 10.224.169.0/24
+ #netmask: 255.255.255.0 - Not Used
+ ranges:
+ reserved:
+ start: 10.224.169.1
+ end: 10.224.169.10
+ static:
+ start: 10.224.169.11
+ end: 10.224.169.254
+dns:
+ upstream_servers:
+ - 10.51.34.231
+ - 8.8.8.8
+ ingress_domain: vran.k2.ericsson.se
+ domain: vran.k2.ericsson.se
+#gpu:
+# alias:
+# - name: "P4"
+# product_id: "1bb2"
+# vendor_id: "10de"
+# - name: "P40"
+# product_id: "1b38"
+# vendor_id: "10de"
+# - name: "P100"
+# product_id: "15f8"
+# vendor_id: "10de"
+# - name: "V100"
+# product_id: "1db4"
+# vendor_id: "10de"
+dpdk:
+ nics:
+ - name: dpdk0
+ pci_id: '0000:86:00.0'
+ bridge: br-phy
+ migrate_ip: true
+storage:
+ osds:
+ - data: /dev/sda
+ journal: /dev/sdh1
+ - data: /dev/sdb
+ journal: /dev/sdh2
+ - data: /dev/sdc
+ journal: /dev/sdh3
+ osd_count: 3
+ total_osd_count: 9
+tenant_storage:
+ osds:
+ - data: /dev/sdd
+ journal: /dev/sdh4
+ - data: /dev/sde
+ journal: /dev/sdh5
+ - data: /dev/sdf
+ journal: /dev/sdh6
+ osd_count: 3
+genesis:
+ name : aknode25
+ oob: 10.51.35.144
+ host: 10.51.34.232
+ storage: 10.224.174.11
+ pxe: 10.224.168.11
+ ksn: 10.224.160.134
+ neutron: 10.224.171.11
+ vxlan: 10.224.169.11
+ root_password: akraino,d
+# bios_template: dell_r740_g14_uefi_base.xml.template
+# boot_template: dell_r740_g14_uefi_httpboot.xml.template
+# http_boot_device: NIC.Slot.2-1-1
+masters:
+ - name: aknode31
+ oob: 10.51.35.147
+ host: 10.51.34.229
+ storage: 10.224.174.13
+ pxe: 10.224.168.13
+ ksn: 10.224.160.136
+ neutron: 10.224.171.13
+ vxlan: 10.224.169.13
+ oob_user: root
+ oob_password: calvin
+ - name : aknode23
+ oob: 10.51.35.143
+ host: 10.51.34.233
+ storage: 10.224.174.12
+ pxe: 10.224.168.12
+ ksn: 10.224.160.135
+ neutron: 10.224.171.12
+ vxlan: 10.224.169.12
+ oob_user: root
+ oob_password: calvin
+ - name : aknode29
+ oob: 10.51.35.146
+ host: 10.51.34.230
+ storage: 10.224.174.14
+ pxe: 10.224.168.14
+ ksn: 10.224.160.137
+ neutron: 10.224.171.14
+ vxlan: 10.224.169.14
+ oob_user: root
+ oob_password: calvin
+#workers:
+# - name : aknode43
+# oob: 192.168.41.43
+# host: 192.168.2.43
+# storage: 172.31.2.43
+# pxe: 172.30.2.43
+# ksn: 172.29.1.43
+# neutron: 10.0.102.43
+platform:
+# vcpu_pin_set: "4-21,26-43,48-65,72-87"
+ kernel_params:
+ kernel_package: 'linux-image-4.15.0-66-generic'
+ hugepagesz: '1G'
+ hugepages: 32
+# default_hugepagesz: '1G'
+# transparent_hugepage: 'never'
+ iommu: 'pt'
+ intel_iommu: 'on'
+# amd_iommu: 'on'
+# console: 'ttyS1,115200n8'
+hardware:
+ vendor: DELL
+ generation: '10'
+ hw_version: '3'
+ bios_version: '2.8'
+ bios_template:
+ boot_template: dell_r740_g14_uefi_httpboot.xml.template
+ http_boot_device: NIC.Slot.2-1-1
+ device_aliases:
+ ## network
+ - name: eno3
+ key: pxe_nic01
+ address: '0000:01:00.0'
+ dev_type: 'I350 Gigabit Network Connection'
+ bus_type: 'pci'
+ - name: enp95s0f0
+ key: data_nic01
+ address: '0000:5f:00.0'
+ dev_type: 'Ethernet 10G 2P X520 Adapter'
+ bus_type: 'pci'
+ - name: enp95s0f1
+ key: data_nic02
+ address: '0000:5f:00.1'
+ dev_type: 'Ethernet 10G 2P X520 Adapter'
+ bus_type: 'pci'
+ - name: enp134s0f0
+ key: dpdk_nic01
+ address: '0000:86:00.0'
+ dev_type: 'Ethernet Controller XXV710'
+ bus_type: 'pci'
+ ## storage - use "dmesg | grep -Pe 'sd \d:\d'" to find address of drives
+ - name: /dev/sdg
+ key: bootdisk
+ address: '0:2.0.0'
+ dev_type: 'PERC H730P'
+ bus_type: 'scsi'
+ - name: /dev/sdh
+ key: cephjournal1
+ address: '0:2.1.0'
+ dev_type: 'PERC H730P'
+ bus_type: 'scsi'
+# - name: /dev/sdi
+# key: cephjournal2
+# address: '0:2.2.0'
+# dev_type: 'PERC H730P'
+# bus_type: 'scsi'
+# - name: /dev/sdj
+# key: ephemeral
+# address: '0:2.3.0'
+# dev_type: 'PERC H730P'
+# bus_type: 'scsi'
+disks:
+# - name : sdg
+ - name : bootdisk
+ labels:
+ bootdrive: 'true'
+ partitions:
+ - name: root
+ size: 30g
+ bootable: true
+ mountpoint: /
+ - name: boot
+ size: 1g
+ mountpoint: /boot
+ - name: var
+ size: '300g'
+ mountpoint: /var
+disks_compute:
+# - name : sdg
+ - name : bootdisk
+ labels:
+ bootdrive: 'true'
+ partitions:
+ - name: root
+ size: 30g
+ bootable: true
+ mountpoint: /
+ - name: boot
+ size: 1g
+ mountpoint: /boot
+ - name: var_log
+ size: '100g'
+ mountpoint: /var/log
+ - name: var
+ size: '>100g'
+ mountpoint: /var
+ - name : ephemeral
+ partitions:
+ - name: nova
+ size: 99%
+ mountpoint: /var/lib/nova
+genesis_ssh_public_key:
+kubernetes:
+ api_service_ip: 10.96.0.1
+ etcd_service_ip: 10.96.0.2
+ pod_cidr: 10.98.0.0/16
+ service_cidr: 10.96.0.0/15
+regional_server:
+ ip: 10.51.34.231
+...
--- /dev/null
+---
+##############################################################################
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may #
+# not use this file except in compliance with the License. #
+# #
+# You may obtain a copy of the License at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+##############################################################################
+# This file defines a boot action for MaaS to deploy the calico-ip-rules script
+# to nodes, register with systemd, and runs the script on all PXE booted nodes.
+# On the genesis node, this is a manual step detailed in deployment documentation.
+
+# NOTE: This is a copy from `aic-clcp-manifests/type/cruiser/v4.0/`, because
+# this is an upstream manifest based on airship-treasuremap, which does not
+# have bgp VIP configuration scripts.
+schema: 'drydock/BootAction/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: calico-ip-rules
+ storagePolicy: 'cleartext'
+ layeringDefinition:
+ abstract: false
+ layer: site
+ labels:
+ application: 'drydock'
+ substitutions:
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .calico.ip_rule.gateway
+ dest:
+ path: .assets[0].data
+ pattern: DH_SUB_GATEWAY_IP
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .kubernetes.pod_cidr
+ dest:
+ path: .assets[0].data
+ pattern: DH_SUB_POD_CIDR
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .calico.bgp.ipv4.public_service_cidr
+ dest:
+ path: .assets[0].data
+ pattern: DH_SUB_INGRESS_CIDR
+ # Substitution of the configure-ip-rules script into this bootaction
+ - src:
+ schema: pegleg/Script/v1
+ name: configure-ip-rules
+ path: .
+ dest:
+ path: .assets[1].data
+data:
+ signaling: false
+ assets:
+ - path: /etc/systemd/system/configure-ip-rules.service
+ type: unit
+ permissions: '444'
+ data: |-
+ [Unit]
+ Description=IP Rules Initialization Service
+ After=network-online.target local-fs.target
+ [Service]
+ Type=simple
+ ExecStart=/opt/configure-ip-rules.sh -g DH_SUB_GATEWAY_IP -c DH_SUB_POD_CIDR -s DH_SUB_INGRESS_CIDR
+ [Install]
+ WantedBy=multi-user.target
+ data_pipeline:
+ - utf8_decode
+ - path: /opt/configure-ip-rules.sh
+ type: file
+ permissions: '700'
+ data_pipeline:
+ - utf8_decode
+...
--- /dev/null
+---
+# # Drydock BaremetalNode resources for a specific rack are stored in this file.
+# #
+# # NOTE: For new sites, you should complete the networks/physical/networks.yaml
+# # file before working on this file.
+# #
+# # In this file, you should make the number of `drydock/BaremetalNode/v1`
+# # resources equal the number of bare metal nodes you have, either by deleting
+# # excess BaremetalNode definitions (if there are too many), or by copying and
+# # pasting the last BaremetalNode in the file until you have the correct number
+# # of baremetal nodes (if there are too few).
+# #
+# # Then in each file, address all additional NEWSITE-CHANGEME markers to update
+# # the data in these files with the right values for your new site.
+# #
+# # *NOTE: The Genesis node is counted as one of the control plane nodes. Note
+# # that the Genesis node does not appear on this bare metal list, because the
+# # procedure to reprovision the Genesis host with MaaS has not yet been
+# # implemented. Therefore there will be only three bare metal nodes in this file
+# # with the 'masters' tag, as the genesis roles are assigned in a difference
+# # place (profiles/genesis.yaml).
+# # NOTE: The host profiles for the control plane are further divided into two
+# # variants: primary and secondary. The only significance this has is that the
+# # "primary" nodes are active Ceph nodes, whereas the "secondary" nodes are Ceph
+# # standby nodes. For Ceph quorum, this means that the control plane split will
+# # be 3 primary + 1 standby host profile, and the Genesis node counts toward one
+# # of the 3 primary profiles. Other control plane services are not affected by
+# # primary vs secondary designation.
+# #
+# # TODO: Include the hostname naming convention
+# #
+# schema: 'drydock/BaremetalNode/v1'
+# metadata:
+ # schema: 'metadata/Document/v1'
+ # # NEWSITE-CHANGEME: Replace with the hostname of the first node in the rack,
+ # # after (excluding) genesis.
+ # name: cab23-r720-12
+ # layeringDefinition:
+ # abstract: false
+ # layer: site
+ # storagePolicy: cleartext
+# data:
+ # # NEWSITE-CHANGEME: The IPv4 address assigned to each logical network on this
+ # # node. In the reference Airship deployment, this is all logical Networks defined
+ # # in networks/physical/networks.yaml. IP addresses are manually assigned, by-hand.
+ # # (what could possibly go wrong!) The instructions differ for each logical
+ # # network, which are laid out below.
+ # addressing:
+ # # The iDrac/iLo IP of the node. It's important that this match up with the
+ # # node's hostname above, so that the rack number and node position encoded
+ # # in the hostname are accurate and matching the node that IPMI operations
+ # # will be performed against (for poweron, poweroff, PXE boot to wipe disk or
+ # # reconfigure identity, etc - very important to get right for these reasons).
+ # # These addresses should already be assigned to nodes racked and stacked in
+ # # the environment; these are not addresses which MaaS assigns.
+ # - network: oob
+ # address: 10.23.104.12
+ # # The IP of the node on the PXE network. Refer to the static IP range
+ # # defined for the PXE network in networks/physical/networks.yaml. Begin allocating
+ # # IPs from this network, starting with the second IP (inclusive) from the
+ # # allocation range of this subnet (Genesis node will have the first IP).
+ # # Ex: If the start IP for the PXE "static" network is 10.23.20.11, then
+ # # genesis will have 10.23.20.11, this node will have 10.23.20.12, and
+ # # so on with incrementing IP addresses with each additional node.
+ # - network: pxe
+ # address: 10.23.20.12
+ # # Genesis node gets first IP, all other nodes increment IPs from there
+ # # within the allocation range defined for the network in
+ # # networks/physical/networks.yaml
+ # - network: oam
+ # address: 10.23.21.12
+ # # Genesis node gets first IP, all other nodes increment IPs from there
+ # # within the allocation range defined for the network in
+ # # networks/physical/networks.yaml
+ # - network: storage
+ # address: 10.23.23.12
+ # # Genesis node gets first IP, all other nodes increment IPs from there
+ # # within the allocation range defined for the network in
+ # # networks/physical/networks.yaml
+ # - network: overlay
+ # address: 10.23.24.12
+ # # Genesis node gets first IP, all other nodes increment IPs from there
+ # # within the allocation range defined for the network in
+ # # networks/physical/networks.yaml
+ # - network: calico
+ # address: 10.23.22.12
+ # # NEWSITE-CHANGEME: Set the host profile for the node.
+ # # Note that there are different host profiles depending if this is a control
+ # # plane vs data plane node, and different profiles that map to different types
+ # # hardware. Control plane host profiles are further broken down into "primary"
+ # # and "secondary" profiles (refer to the Notes section at the top of this doc).
+ # # Select the host profile that matches up to your type of
+ # # hardware and function. E.g., the r720 here refers to Dell R720 hardware, the
+ # # 'cp' refers to a control plane profile, and the "primary" means it will be
+ # # an active member in the ceph quorum. Refer to profiles/host/ for the list
+ # # of available host profiles specific to this site (otherwise, you may find
+ # # a general set of host profiles at the "type" or "global" layers/folders.
+ # # If you have hardware that is not on this list of profiles, you may need to
+ # # create a new host profile for that hardware.
+ # # Regarding control plane vs other data plane profiles, refer to the notes at
+ # # the beginning of this file. There should be one control plane node per rack,
+ # # including Genesis. Note Genesis won't actually be listed in this file as a
+ # # BaremetalNode, but the rest are.
+ # # This is the second "primary" control plane node after Genesis.
+ # host_profile: cp_r720-primary
+ # metadata:
+ # tags:
+ # # NEWSITE-CHANGEME: See previous comment. Apply 'masters' tag for control
+ # # plane node, and 'workers' tag for data plane hosts.
+ # - 'masters'
+ # # NEWSITE-CHANGEME: Refer to site engineering package or other supporting
+ # # documentation for the specific rack name. This should be a rack name that
+ # # is meaningful to data center personnel (i.e. a rack they could locate if
+ # # you gave them this rack designation).
+ # rack: cab23
+# ...
+{% for server in yaml.masters %}
+{% if loop.index > 1 %}
+---
+{% endif %}
+schema: 'drydock/BaremetalNode/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ # NEWSITE-CHANGEME: The next node's hostname
+ name: {{server.name}}
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # NEWSITE-CHANGEME: The next node's IPv4 addressing
+ addressing:
+ - network: oob
+ address: {{server.oob}}
+ - network: pxe
+ address: {{server.pxe}}
+ - network: oam
+ address: {{server.host}}
+ - network: storage
+ address: {{server.storage}}
+ - network: overlay
+ address: {{server.neutron}}
+ - network: calico
+ address: {{server.ksn}}
+ - network: dpdk
+ address: {{server.vxlan}}
+ # NEWSITE-CHANGEME: The next node's host profile
+{% if loop.index < 3 %}
+ host_profile: cp_r720-primary
+{% else %}
+ host_profile: cp_r720-secondary
+{% endif %}
+ metadata:
+ # NEWSITE-CHANGEME: The next node's rack designation
+ rack: cab23
+ # NEWSITE-CHANGEME: The next node's role desigatnion
+ tags:
+ - 'masters'
+{% if 'platform' in yaml %}
+ platform:
+ kernel_params:
+{% for key, value in yaml.platform.kernel_params.items() %}
+ {{key}}: '{{value}}'
+{% endfor %}
+{% if 'vcpu_pin_set' in yaml.platform %}
+ isolcpus: '{{yaml.platform.vcpu_pin_set}}'
+{% endif %}
+{% endif %}
+...
+{% endfor %}
+{% if 'workers' in yaml %}{% for server in yaml.workers %}
+---
+schema: 'drydock/BaremetalNode/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ # NEWSITE-CHANGEME: The next node's hostname
+ name: cab23-r720-14
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # NEWSITE-CHANGEME: The next node's IPv4 addressing
+ addressing:
+ - network: oob
+ address: {{server.oob}}
+ - network: pxe
+ address: {{server.pxe}}
+ - network: oam
+ address: {{server.host}}
+ - network: storage
+ address: {{server.storage}}
+ - network: overlay
+ address: {{server.neutron}}
+ - network: calico
+ address: {{server.ksn}}
+ - network: dpdk
+ address: {{server.vxlan}}
+ # NEWSITE-CHANGEME: The next node's host profile
+ # This is the third "primary" control plane profile after genesis
+ host_profile: dp_r720
+ metadata:
+ # NEWSITE-CHANGEME: The next node's rack designation
+ rack: cab23
+ # NEWSITE-CHANGEME: The next node's role desigatnion
+ tags:
+ - 'workers'
+{% if 'platform' in yaml %}
+ platform:
+ kernel_params:
+{% for key, value in yaml.platform.kernel_params.items() %}
+ {{key}}: '{{value}}'
+{% endfor %}
+{% if 'vcpu_pin_set' in yaml.platform %}
+ isolcpus: '{{yaml.platform.vcpu_pin_set}}'
+{% endif %}
+{% endif %}
+...
+{% endfor %}{% endif %}
--- /dev/null
+---
+# The purpose of this file is to define network related paramters that are
+# referenced elsewhere in the manifests for this site.
+#
+# TODO: Include bare metal host FQDN naming standards
+# TODO: Include ingress FQDN naming standards
+schema: pegleg/CommonAddresses/v1
+metadata:
+ schema: metadata/Document/v1
+ name: common-addresses
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ calico:
+ # NEWSITE-CHANGEME: The interface that calico will use. Update if your
+ # logical bond interface name or calico VLAN have changed from the reference
+ # site design.
+ # This should be whichever
+ # bond and VLAN number specified in networks/physical/networks.yaml for the Calico
+ # network. E.g. VLAN 22 for the calico network as a member of bond0, you
+ # would set "interface=bond0.22" as shown here.
+ ip_autodetection_method: interface={{yaml.networks.ksn.interface}}
+ etcd:
+ # etcd service IP address
+ service_ip: 10.96.232.136
+ ip_rule:
+ gateway: {{yaml.networks.ksn.gateway }}
+ bgp:
+ ipv4:
+ public_service_cidr: {{yaml.networks.ksn.additional_cidrs[0]}}
+ ingress_vip: {{yaml.networks.ksn.ingress_vip}}
+{% if ('peers' in yaml.networks.ksn and yaml.networks.ksn.peers is not none and yaml.networks.ksn.peers is iterable ) %}
+ peers:
+{% for peer in yaml.networks.ksn.peers %}
+ - {{peer.ip}}
+{% endfor %}
+{% endif %}
+ vip:
+ ingress_vip: '{{yaml.networks.host.ingress_vip}}/32'
+ maas_vip: '{{yaml.networks.host.maas_vip}}/32'
+
+ dns:
+ # Kubernetes cluster domain. Do not change. This is internal to the cluster.
+ cluster_domain: cluster.local
+ # DNS service ip
+ service_ip: 10.96.0.10
+ # List of upstream DNS forwards. Verify you can reach them from your
+ # environment. If so, you should not need to change them.
+ upstream_servers:
+{% for server in yaml.dns.upstream_servers %}
+ - {{server}}
+{% endfor %}
+ # Repeat the same values as above, but formatted as a common separated
+ # string
+ upstream_servers_joined: '{{yaml.dns.upstream_servers|batch(2)|first|join(',')}}'
+ # NEWSITE-CHANGEME: FQDN for ingress (i.e. "publicly facing" access point)
+ # Choose FQDN according to the ingress/public FQDN naming conventions at
+ # the top of this document.
+ ingress_domain: {{yaml.dns.ingress_domain}}
+
+ genesis:
+ # NEWSITE-CHANGEME: Update with the hostname for the node which will take on
+ # the Genesis role. Refer to the hostname naming stardards in
+ # networks/physical/networks.yaml
+ # NOTE: Ensure that the genesis node is manually configured with this
+ # hostname before running `genesis.sh` on the node.
+ hostname: {{yaml.genesis.name}}
+ # NEWSITE-CHANGEME: Calico IP of the Genesis node. Use the "start" value for
+ # the calico network defined in networks/physical/networks.yaml for this IP.
+ ip: {{yaml.genesis.ksn}}
+
+ bootstrap:
+ # NEWSITE-CHANGEME: Update with the "start" value/IP of the static range
+ # defined for the pxe network in networks/physical/networks.yaml
+ ip: {{yaml.genesis.pxe}}
+
+ kubernetes:
+ # K8s API service IP
+ api_service_ip: {{yaml.kubernetes.api_service_ip}}
+ # etcd service IP
+ etcd_service_ip: {{yaml.kubernetes.etcd_service_ip}}
+ # k8s pod CIDR (network which pod traffic will traverse)
+ pod_cidr: {{yaml.kubernetes.pod_cidr}}
+ # k8s service CIDR (network which k8s API traffic will traverse)
+ service_cidr: {{yaml.kubernetes.service_cidr}}
+ # misc k8s port settings
+ apiserver_port: 6443
+ haproxy_port: 6553
+ service_node_port_range: 30000-32767
+
+ # etcd port settings
+ etcd:
+ container_port: 2379
+ haproxy_port: 2378
+
+ # NEWSITE-CHANGEME: A list of nodes (apart from Genesis) which act as the
+ # control plane servers. Ensure that this matches the nodes with the 'masters'
+ # tags applied in baremetal/nodes.yaml
+ masters:
+{% for master in yaml.masters %}
+ - hostname: {{master.name}}
+{% endfor %}
+
+ # NEWSITE-CHANGEME: Environment proxy information.
+ # NOTE: Reference Airship sites do not deploy behind a proxy, so this proxy section
+ # should be commented out.
+ # However if you are in a lab that requires proxy, ensure that these proxy
+ # settings are correct and reachable in your environment; otherwise update
+ # them with the correct values for your environment.
+ proxy:
+ http: ""
+ https: ""
+ no_proxy: []
+
+ node_ports:
+ drydock_api: 30000
+ maas_api: 30001
+ maas_proxy: 31800 # hardcoded in MAAS
+
+ ntp:
+ # comma separated NTP server list. Verify that these upstream NTP servers are
+ # reachable in your environment; otherwise update them with the correct
+ # values for your environment.
+ servers_joined: '0.ubuntu.pool.ntp.org,1.ubuntu.pool.ntp.org,2.ubuntu.pool.ntp.org,4.ubuntu.pool.ntp.org'
+
+ # NOTE: This will be updated soon
+ ldap:
+ # NEWSITE-CHANGEME: FQDN for LDAP. Update to the FQDN that is
+ # relevant for your type of deployment (test vs prod values, etc).
+ base_url: 'ldap.example.com'
+ # NEWSITE-CHANGEME: As above, with the protocol included to create a full URI
+ url: 'ldap://ldap.example.com'
+ # NEWSITE-CHANGEME: Update to the correct expression relevant for this
+ # deployment (test vs prod values, etc)
+ auth_path: DC=test,DC=test,DC=com?sAMAccountName?sub?memberof=CN=test,OU=Application,OU=Groups,DC=test,DC=test,DC=com
+ # NEWSITE-CHANGEME: Update to the correct AD group that contains the users
+ # relevant for this deployment (test users vs prod users/values, etc)
+ common_name: test
+ # NEWSITE-CHANGEME: Update to the correct subdomain for your type of
+ # deployment (test vs prod values, etc)
+ subdomain: test
+ # NEWSITE-CHANGEME: Update to the correct domain for your type of
+ # deployment (test vs prod values, etc)
+ domain: example
+
+ storage:
+ ceph:
+ # NEWSITE-CHANGEME: CIDRs for Ceph. Update to match the network CIDR
+ # used for the `storage` network in networks/physical/networks.yaml
+ public_cidr: '{{yaml.networks.storage.cidr}}'
+ cluster_cidr: '{{yaml.networks.storage.cidr}}'
+
+ neutron:
+ # NEWSITE-CHANGEME: Overlay network for VM traffic. Ensure the bond name and
+ # VLAN number are consistent with what's defined for the bond and the overlay
+ # network in networks/physical/networks.yaml
+ tunnel_device: '{{yaml.networks.neutron.interface}}'
+ # bond which the overlay is a member of. Ensure the bond name is consistent
+ # with the bond assigned to the overlay network in
+ # networks/physical/networks.yaml
+ external_iface: '{{yaml.networks.primary}}'
+
+ openvswitch:
+ # bond which the overlay is a member of. Ensure the bond name is consistent
+ # with the bond assigned to the overlay network in
+ # networks/physical/networks.yaml
+ external_iface: '{{yaml.networks.primary}}'
+...
--- /dev/null
+---
+# The purpose of this file is to define all of the NetworkLinks (i.e. layer 1
+# devices) and Networks (i.e. layer 3 configurations). The following is standard
+# for the logical networks in Airship:
+#
+# +----------+-----------------------------------+----------------+--------------+----------------------------------------------------+-----------------+
+# | Network | | Per-rack or | | | VLAN tagged |
+# | Name | Purpose | per-site CIDR? | Has gateway? | Bond | or untagged? |
+# +----------+-----------------------------------+----------------+--------------+----------------------------------------------------+-----------------+
+# | oob | Out of Band devices (iDrac/iLo) | per-site CIDR | Has gateway | No bond, N/A | Untagged/Native |
+# | pxe | PXE boot network | per-site CIDR | No gateway | No bond, no LACP fallback. Dedicated PXE interface | Untagged/Native |
+# | oam | management network | per-site CIDR | Has gateway | member of bond0 | tagged |
+# | storage | storage network | per-site CIDR | No gateway | member of bond0 | tagged |
+# | calico | underlay calico net; k8s traffic | per-site CIDR | No gateway | member of bond0 | tagged |
+# | overlay | overlay network for openstack SDN | per-site CIDR | No gateway | member of bond0 | tagged |
+# +----------+-----------------------------------+----------------+--------------+----------------------------------------------------+-----------------+
+#
+# For standard Airship deployments, you should not need to modify the number of
+# NetworkLinks and Networks in this file. Only the IP addresses and CIDRs should
+# need editing.
+#
+# TODO: Given that we expect all network broadcast domains to span all racks in
+# Airship, we should choose network names that do not include the rack number.
+#
+# TODO: FQDN naming standards for hosts
+#
+schema: 'drydock/NetworkLink/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: oob
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # MaaS doesnt own this network like it does the others, so the noconfig label
+ # is specified.
+ labels:
+ noconfig: enabled
+ bonding:
+ mode: disabled
+ mtu: 1500
+ linkspeed: auto
+ trunking:
+ mode: disabled
+ default_network: oob
+ allowed_networks:
+ - oob
+...
+---
+schema: 'drydock/Network/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: oob
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # NEWSITE-CHANGEME: Update with the site's out-of-band CIDR
+ cidr: {{yaml.networks.oob.cidr}}
+ routes:
+ # NEWSITE-CHANGEME: Update with the site's out-of-band gateway IP
+ - subnet: '0.0.0.0/0'
+ gateway: {{yaml.networks.oob.routes.gateway}}
+ metric: 100
+ # NEWSITE-CHANGEME: Update with the site's out-of-band IP allocation range
+ # FIXME: Is this IP range actually used/allocated for anything? The HW already
+ # has its OOB IPs assigned. None of the Ubuntu OS's should need IPs on OOB
+ # network either, as they should be routable via the default gw on OAM network
+ ranges:
+ - type: static
+ start: {{yaml.networks.oob.ranges.static.start}}
+ end: {{yaml.networks.oob.ranges.static.end}}
+...
+---
+schema: 'drydock/NetworkLink/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: pxe
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ bonding:
+ mode: disabled
+ mtu: 1500
+ linkspeed: auto
+ trunking:
+ mode: disabled
+ default_network: pxe
+ allowed_networks:
+ - pxe
+...
+---
+schema: 'drydock/Network/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: pxe
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # NEWSITE-CHANGEME: Update with the site's PXE network CIDR
+ # NOTE: The CIDR minimum size = (number of nodes * 2) + 10
+ cidr: {{yaml.networks.pxe.cidr}}
+ routes:
+ - subnet: 0.0.0.0/0
+ # NEWSITE-CHANGEME: Set the OAM network gateway IP address
+ gateway: {{yaml.networks.pxe.routes.gateway}}
+ metric: 100
+ # NOTE: The first 10 IPs in the subnet are reserved for network infrastructure.
+ # The remainder of the range is divided between two subnets of equal size:
+ # one static, and one DHCP.
+ # The DHCP addresses are used when nodes perform a PXE boot (DHCP address gets
+ # assigned), and when a node is commissioning in MaaS (also uses DHCP to get
+ # its IP address). However, when MaaS installs the operating system
+ # ("Deploying/Deployed" states), it will write a static IP assignment to
+ # /etc/network/interfaces[.d] with IPs from the "static" subnet defined here.
+ ranges:
+ # NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
+ - type: reserved
+ start: {{yaml.networks.pxe.ranges.reserved.start}}
+ end: {{yaml.networks.pxe.ranges.reserved.end}}
+ # NEWSITE-CHANGEME: Update to the first half of the remaining range after
+ # excluding the 10 reserved IPs.
+ - type: static
+ start: {{yaml.networks.pxe.ranges.static.start}}
+ end: {{yaml.networks.pxe.ranges.static.end}}
+ # NEWSITE-CHANGEME: Update to the second half of the remaining range after
+ # excluding the 10 reserved IPs.
+ - type: dhcp
+ start: {{yaml.networks.pxe.ranges.dhcp.start}}
+ end: {{yaml.networks.pxe.ranges.dhcp.end}}
+ dns:
+ # NEWSITE-CHANGEME: FQDN for bare metal nodes.
+ # Choose FQDN according to the node FQDN naming conventions at the top of
+ # this document.
+ domain: {% if 'dns' in yaml.networks.pxe and 'domain' in yaml.networks.pxe.dns %}{{yaml.networks.pxe.dns.domain}}
+ {% else %}{{yaml.dns.domain}}
+ {% endif %}
+ # List of upstream DNS forwards. Verify you can reach them from your
+ # environment. If so, you should not need to change them.
+ # TODO: This should be populated via substitution from common-addresses
+ servers: '{% if 'dns' in yaml.networks.pxe %}{{yaml.networks.pxe.dns.servers}}{% else %}{{yaml.dns.upstream_servers|join(' ')}}{% endif %}'
+...
+---
+schema: 'drydock/NetworkLink/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: data
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ bonding:
+{% if yaml.networks.bonded %}
+ mode: 802.3ad
+ hash: layer3+4
+ peer_rate: fast
+ mon_rate: 100
+ up_delay: 1000
+ down_delay: 3000
+{% else %}
+ mode: disabled
+{% endif %}
+ # NEWSITE-CHANGEME: Ensure the network switches in the environment are
+ # configured for this MTU or greater. Even if switches are configured for or
+ # can support a slightly higher MTU, there is no need (and negliable benefit)
+ # to squeeze every last byte into the MTU (e.g., 9216 vs 9100). Leave MTU at
+ # 9100 for maximum compatibility.
+ mtu: 9000
+ linkspeed: auto
+ trunking:
+ mode: 802.1q
+ allowed_networks:
+ - oam
+ - storage
+ - overlay
+ - calico
+...
+---
+schema: 'drydock/Network/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: oam
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # NEWSITE-CHANGEME: Set the VLAN ID which the OAM network is on
+ vlan: '{{yaml.networks.host.vlan}}'
+ mtu: 9000
+ # NEWSITE-CHANGEME: Set the CIDR for the OAM network
+ # NOTE: The CIDR minimum size = number of nodes + 10
+ cidr: {{yaml.networks.host.cidr}}
+ routes:
+ - subnet: 0.0.0.0/0
+ # NEWSITE-CHANGEME: Set the OAM network gateway IP address
+ gateway: {{yaml.networks.host.routes.gateway}}
+ metric: 100
+ ranges:
+ # NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
+ - type: reserved
+ start: {{yaml.networks.host.ranges.reserved.start}}
+ end: {{yaml.networks.host.ranges.reserved.end}}
+ # NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
+ # 10 reserved IPs.
+ - type: static
+ start: {{yaml.networks.host.ranges.static.start}}
+ end: {{yaml.networks.host.ranges.static.end}}
+ dns:
+ # NEWSITE-CHANGEME: FQDN for bare metal nodes.
+ # Choose FQDN according to the node FQDN naming conventions at the top of
+ # this document.
+ domain: {% if 'dns' in yaml.networks.host and 'domain' in yaml.networks.host.dns %}{{yaml.networks.host.dns.domain}}
+ {% else %}{{yaml.dns.domain}}
+ {% endif %}
+ # List of upstream DNS forwards. Verify you can reach them from your
+ # environment. If so, you should not need to change them.
+ # TODO: This should be populated via substitution from common-addresses
+ servers: '{% if 'dns' in yaml.networks.host %}{{yaml.networks.host.dns.servers}}{% else %}{{yaml.dns.upstream_servers|join(' ')}}{% endif %}'
+...
+---
+schema: 'drydock/Network/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: storage
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # NEWSITE-CHANGEME: Set the VLAN ID which the storage network is on
+ vlan: '{{yaml.networks.storage.vlan}}'
+ mtu: 9000
+ # NEWSITE-CHANGEME: Set the CIDR for the storage network
+ # NOTE: The CIDR minimum size = number of nodes + 10
+ cidr: {{yaml.networks.storage.cidr}}
+ ranges:
+ # NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
+ - type: reserved
+ start: {{yaml.networks.storage.ranges.reserved.start}}
+ end: {{yaml.networks.storage.ranges.reserved.end}}
+ # NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
+ # 10 reserved IPs.
+ - type: static
+ start: {{yaml.networks.storage.ranges.static.start}}
+ end: {{yaml.networks.storage.ranges.static.end}}
+...
+---
+schema: 'drydock/Network/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: overlay
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # NEWSITE-CHANGEME: Set the VLAN ID which the overlay network is on
+ vlan: '{{yaml.networks.neutron.vlan}}'
+ mtu: 9000
+ # NEWSITE-CHANGEME: Set the CIDR for the overlay network
+ # NOTE: The CIDR minimum size = number of nodes + 10
+ cidr: {{yaml.networks.neutron.cidr}}
+ ranges:
+ # NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
+ - type: reserved
+ start: {{yaml.networks.neutron.ranges.reserved.start}}
+ end: {{yaml.networks.neutron.ranges.reserved.end}}
+ # NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
+ # 10 reserved IPs.
+ - type: static
+ start: {{yaml.networks.neutron.ranges.static.start}}
+ end: {{yaml.networks.neutron.ranges.static.end}}
+...
+---
+schema: 'drydock/Network/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: calico
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # NEWSITE-CHANGEME: Set the VLAN ID which the calico network is on
+ vlan: '{{yaml.networks.ksn.vlan}}'
+ mtu: 9000
+ # NEWSITE-CHANGEME: Set the CIDR for the calico network
+ # NOTE: The CIDR minimum size = number of nodes + 10
+ cidr: {{yaml.networks.ksn.cidr}}
+ ranges:
+ # NEWSITE-CHANGEME: Update to the first 10 IPs in the CIDR
+ - type: reserved
+ start: {{yaml.networks.ksn.ranges.reserved.start}}
+ end: {{yaml.networks.ksn.ranges.reserved.end}}
+ # NEWSITE-CHANGEME: Update to the remaining range after excluding the 10
+ # 10 reserved IPs.
+ - type: static
+ start: {{yaml.networks.ksn.ranges.static.start}}
+ end: {{yaml.networks.ksn.ranges.static.end}}
+...
+---
+schema: 'drydock/NetworkLink/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: dpdk
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ bonding:
+ mode: disabled
+ mtu: 1500
+ linkspeed: auto
+ trunking:
+ mode: disabled
+ default_network: dpdk
+ allowed_networks:
+ - dpdk
+...
+---
+schema: 'drydock/Network/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: dpdk
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ mtu: 1500
+ cidr: {{yaml.networks.vxlan.cidr}}
+ ranges:
+ - type: reserved
+ start: {{yaml.networks.vxlan.ranges.reserved.start}}
+ end: {{yaml.networks.vxlan.ranges.reserved.end}}
+ - type: static
+ start: {{yaml.networks.vxlan.ranges.static.start}}
+ end: {{yaml.networks.vxlan.ranges.static.end}}
+...
--- /dev/null
+---
+# The purpose of this file is to define the PKI certificates for the environment
+#
+# NOTE: When deploying a new site, this file should not be configured until
+# baremetal/nodes.yaml is complete.
+#
+schema: promenade/PKICatalog/v1
+metadata:
+ schema: metadata/Document/v1
+ name: cluster-certificates
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ certificate_authorities:
+ kubernetes:
+ description: CA for Kubernetes components
+ certificates:
+ - document_name: apiserver
+ description: Service certificate for Kubernetes apiserver
+ common_name: apiserver
+ hosts:
+ - localhost
+ - 127.0.0.1
+ # FIXME: Repetition of api_service_ip in common-addresses; use
+ # substitution
+ - {{yaml.kubernetes.api_service_ip}}
+ kubernetes_service_names:
+ - kubernetes.default.svc.cluster.local
+
+ # NEWSITE-CHANGEME: The following should be a list of all the nodes in
+ # the environment (genesis, control plane, data plane, everything).
+ # Add/delete from this list as necessary until all nodes are listed.
+ # For each node, the `hosts` list should be comprised of:
+ # 1. The node's hostname, as already defined in baremetal/nodes.yaml
+ # 2. The node's oam IP address, as already defined in baremetal/nodes.yaml
+ # 3. The node's Calico IP address, as already defined in baremetal/nodes.yaml
+ # NOTE: This list also needs to include the Genesis node, which is not
+ # listed in baremetal/nodes.yaml, but by convention should be allocated
+ # the first non-reserved IP in each logical network allocation range
+ # defined in networks/physical/networks.yaml
+ # NOTE: The genesis node needs to be defined twice (the first two entries
+ # on this list) with all of the same paramters except the document_name.
+ # In the first case the document_name is `kubelet-genesis`, and in the
+ # second case the document_name format is `kubelete-YOUR_GENESIS_HOSTNAME`.
+ - document_name: kubelet-genesis
+ common_name: system:node:{{yaml.genesis.name}}
+ hosts:
+ - {{yaml.genesis.name}}
+ - {{yaml.genesis.host}}
+ - {{yaml.genesis.ksn}}
+ groups:
+ - system:nodes
+ - document_name: kubelet-{{yaml.genesis.name}}
+ common_name: system:node:{{yaml.genesis.name}}
+ hosts:
+ - {{yaml.genesis.name}}
+ - {{yaml.genesis.host}}
+ - {{yaml.genesis.ksn}}
+ groups:
+ - system:nodes
+{% for server in yaml.masters %}
+ - document_name: kubelet-{{ server.name }}
+ common_name: system:node:{{ server.name }}
+ hosts:
+ - {{server.name}}
+ - {{server.host}}
+ - {{server.ksn}}
+ groups:
+ - system:nodes
+{% endfor %}
+{% if 'workers' in yaml %}{% for server in yaml.workers %}
+ - document_name: kubelet-{{ server.name }}
+ common_name: system:node:{{ server.name }}
+ hosts:
+ - {{server.name}}
+ - {{server.host}}
+ - {{server.ksn}}
+ groups:
+ - system:nodes
+{% endfor %}{% endif %}
+ # End node list
+ - document_name: scheduler
+ description: Service certificate for Kubernetes scheduler
+ common_name: system:kube-scheduler
+ - document_name: controller-manager
+ description: certificate for controller-manager
+ common_name: system:kube-controller-manager
+ - document_name: admin
+ common_name: admin
+ groups:
+ - system:masters
+ - document_name: armada
+ common_name: armada
+ groups:
+ - system:masters
+ kubernetes-etcd:
+ description: Certificates for Kubernetes's etcd servers
+ certificates:
+ - document_name: apiserver-etcd
+ description: etcd client certificate for use by Kubernetes apiserver
+ common_name: apiserver
+ # NOTE(mark-burnett): hosts not required for client certificates
+ - document_name: kubernetes-etcd-anchor
+ description: anchor
+ common_name: anchor
+ # NEWSITE-CHANGEME: The following should be a list of the control plane
+ # nodes in the environment, including genesis.
+ # For each node, the `hosts` list should be comprised of:
+ # 1. The node's hostname, as already defined in baremetal/nodes.yaml
+ # 2. The node's oam IP address, as already defined in baremetal/nodes.yaml
+ # 3. The node's Calico IP address, as already defined in baremetal/nodes.yaml
+ # 4. 127.0.0.1
+ # 5. localhost
+ # 6. kubernetes-etcd.kube-system.svc.cluster.local
+ # NOTE: This list also needs to include the Genesis node, which is not
+ # listed in baremetal/nodes.yaml, but by convention should be allocated
+ # the first non-reserved IP in each logical network allocation range
+ # defined in networks/physical/networks.yaml, except for the kubernetes
+ # service_cidr where it should start with the second IP in the range.
+ # NOTE: The genesis node is defined twice with the same `hosts` data:
+ # Once with its hostname in the common/document name, and once with
+ # `genesis` defined instead of the host. For now, this duplicated
+ # genesis definition is required. FIXME: Remove duplicate definition
+ # after Promenade addresses this issue.
+ - document_name: kubernetes-etcd-genesis
+ common_name: kubernetes-etcd-genesis
+ hosts:
+ - {{yaml.genesis.name}}
+ - {{yaml.genesis.host}}
+ - {{yaml.genesis.ksn}}
+ - 127.0.0.1
+ - localhost
+ - kubernetes-etcd.kube-system.svc.cluster.local
+ - {{yaml.kubernetes.etcd_service_ip}}
+ - document_name: kubernetes-etcd-{{yaml.genesis.name}}
+ common_name: kubernetes-etcd-{{yaml.genesis.name}}
+ hosts:
+ - {{yaml.genesis.name}}
+ - {{yaml.genesis.host}}
+ - {{yaml.genesis.ksn}}
+ - 127.0.0.1
+ - localhost
+ - kubernetes-etcd.kube-system.svc.cluster.local
+ - {{yaml.kubernetes.etcd_service_ip}}
+{% for server in yaml.masters %}
+ - document_name: kubernetes-etcd-{{ server.name }}
+ common_name: kubernetes-etcd-{{ server.name }}
+ hosts:
+ - {{server.name}}
+ - {{server.host}}
+ - {{server.ksn}}
+ - 127.0.0.1
+ - localhost
+ - kubernetes-etcd.kube-system.svc.cluster.local
+ - {{yaml.kubernetes.etcd_service_ip}}
+{% endfor %}
+ # End node list
+ kubernetes-etcd-peer:
+ certificates:
+ # NEWSITE-CHANGEME: This list should be identical to the previous list,
+ # except that `-peer` has been appended to the document/common names.
+ - document_name: kubernetes-etcd-genesis-peer
+ common_name: kubernetes-etcd-genesis-peer
+ hosts:
+ - {{yaml.genesis.name}}
+ - {{yaml.genesis.host}}
+ - {{yaml.genesis.ksn}}
+ - 127.0.0.1
+ - localhost
+ - kubernetes-etcd.kube-system.svc.cluster.local
+ - {{yaml.kubernetes.etcd_service_ip}}
+ - document_name: kubernetes-etcd-{{yaml.genesis.name}}-peer
+ common_name: kubernetes-etcd-{{yaml.genesis.name}}-peer
+ hosts:
+ - {{yaml.genesis.name}}
+ - {{yaml.genesis.host}}
+ - {{yaml.genesis.ksn}}
+ - 127.0.0.1
+ - localhost
+ - kubernetes-etcd.kube-system.svc.cluster.local
+ - {{yaml.kubernetes.etcd_service_ip}}
+{% for server in yaml.masters %}
+ - document_name: kubernetes-etcd-{{server.name}}-peer
+ common_name: kubernetes-etcd-{{server.name}}-peer
+ hosts:
+ - {{server.name}}
+ - {{server.host}}
+ - {{server.ksn}}
+ - 127.0.0.1
+ - localhost
+ - kubernetes-etcd.kube-system.svc.cluster.local
+ - {{yaml.kubernetes.etcd_service_ip}}
+{% endfor %}
+ # End node list
+ calico-etcd:
+ description: Certificates for Calico etcd client traffic
+ certificates:
+ - document_name: calico-etcd-anchor
+ description: anchor
+ common_name: anchor
+ # NEWSITE-CHANGEME: The following should be a list of the control plane
+ # nodes in the environment, including genesis.
+ # For each node, the `hosts` list should be comprised of:
+ # 1. The node's hostname, as already defined in baremetal/nodes.yaml
+ # 2. The node's oam IP address, as already defined in baremetal/nodes.yaml
+ # 3. The node's Calico IP address, as already defined in baremetal/nodes.yaml
+ # 4. 127.0.0.1
+ # 5. localhost
+ # 6. The calico/etcd/service_ip defined in networks/common-addresses.yaml
+ # NOTE: This list also needs to include the Genesis node, which is not
+ # listed in baremetal/nodes.yaml, but by convention should be allocated
+ # the first non-reserved IP in each logical network allocation range
+ # defined in networks/physical/networks.yaml
+ - document_name: calico-etcd-{{yaml.genesis.name}}
+ common_name: calico-etcd-{{yaml.genesis.name}}
+ hosts:
+ - {{yaml.genesis.name}}
+ - {{yaml.genesis.host}}
+ - {{yaml.genesis.ksn}}
+ - 127.0.0.1
+ - localhost
+ - 10.96.232.136
+{% for server in yaml.masters %}
+ - document_name: calico-etcd-{{server.name}}
+ common_name: calico-etcd-{{server.name}}
+ hosts:
+ - {{server.name}}
+ - {{server.host}}
+ - {{server.ksn}}
+ - 127.0.0.1
+ - localhost
+ - 10.96.232.136
+{% endfor %}
+ - document_name: calico-node
+ common_name: calcico-node
+ # End node list
+ calico-etcd-peer:
+ description: Certificates for Calico etcd clients
+ certificates:
+ # NEWSITE-CHANGEME: This list should be identical to the previous list,
+ # except that `-peer` has been appended to the document/common names.
+ - document_name: calico-etcd-{{yaml.genesis.name}}-peer
+ common_name: calico-etcd-{{yaml.genesis.name}}-peer
+ hosts:
+ - {{yaml.genesis.name}}
+ - {{yaml.genesis.host}}
+ - {{yaml.genesis.ksn}}
+ - 127.0.0.1
+ - localhost
+ - 10.96.232.136
+{% for server in yaml.masters %}
+ - document_name: calico-etcd-{{server.name}}-peer
+ common_name: calico-etcd-{{server.name}}-peer
+ hosts:
+ - {{server.name}}
+ - {{server.host}}
+ - {{server.ksn}}
+ - 127.0.0.1
+ - localhost
+ - 10.96.232.136
+{% endfor %}
+ - document_name: calico-node-peer
+ common_name: calcico-node-peer
+ # End node list
+ keypairs:
+ - name: service-account
+ description: Service account signing key for use by Kubernetes controller-manager.
+...
--- /dev/null
+---
+# The purpose of this file is to apply proper labels to Genesis node so the
+# proper services are installed and proper configuration applied. This should
+# not need to be changed for a new site.
+# #GLOBAL-CANDIDATE#
+schema: promenade/Genesis/v1
+metadata:
+ schema: metadata/Document/v1
+ name: genesis-site
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: genesis-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+data:
+ labels:
+ dynamic:
+ - beta.kubernetes.io/fluentd-ds-ready=true
+ - calico-etcd=enabled
+ - ceph-mds=enabled
+ - ceph-mon=enabled
+ - ceph-osd=enabled
+ - ceph-rgw=enabled
+ - ceph-mgr=enabled
+ - ceph-bootstrap=enabled
+ - tenant-ceph-control-plane=enabled
+ - tenant-ceph-mon=enabled
+ - tenant-ceph-rgw=enabled
+ - tenant-ceph-mgr=enabled
+ - kube-dns=enabled
+ - kube-ingress=enabled
+ - kubernetes-apiserver=enabled
+ - kubernetes-controller-manager=enabled
+ - kubernetes-etcd=enabled
+ - kubernetes-scheduler=enabled
+ - promenade-genesis=enabled
+ - ucp-control-plane=enabled
+ - maas-rack=enabled
+ - maas-region=enabled
+ - ceph-osd-bootstrap=enabled
+ - openstack-control-plane=enabled
+ - openvswitch=enabled
+ - openstack-l3-agent=enabled
+ - fluentd=enabled
+ - fluentbit=enabled
+ - node-exporter=enabled
+...
--- /dev/null
+---
+schema: 'drydock/HardwareProfile/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ name: dell_r720
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ # Vendor of the server chassis
+ vendor: {{yaml.hardware.vendor}}
+ # Generation of the chassis model
+ generation: '{{yaml.hardware.generation}}'
+ # Version of the chassis model within its generation - not version of the hardware definition
+ hw_version: '{{yaml.hardware.hw_version}}'
+ # The certified version of the chassis BIOS
+ bios_version: '{{yaml.hardware.bios_version}}'
+ # Mode of the default boot of hardware - bios, uefi
+ boot_mode: bios
+ # Protocol of boot of the hardware - pxe, usb, hdd
+ bootstrap_protocol: pxe
+ # Which interface to use for network booting within the OOB manager, not OS device
+ pxe_interface: 0
+ # Map hardware addresses to aliases/roles to allow a mix of hardware configs
+ # in a site to result in a consistent configuration
+ device_aliases:
+{% if 'hardware' in yaml and 'device_aliases' in yaml.hardware %}
+{% for device in yaml.hardware.device_aliases %}
+ # {{ device.name }}
+ {{ device.key }}:
+ address: '{{ device.address }}'
+ dev_type: '{{ device.dev_type }}'
+ bus_type: '{{ device.bus_type }}'
+{% endfor %}
+{% endif %}
+...
--- /dev/null
+---
+# The primary control plane host profile for Airship for DELL R720s, and
+# should not need to be altered if you are using matching HW. The active
+# participants in the Ceph cluster run on this profile. Other control plane
+# services are not affected by primary vs secondary designation.
+schema: drydock/HostProfile/v1
+metadata:
+ schema: metadata/Document/v1
+ name: cp_r720-primary
+ storagePolicy: cleartext
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ hosttype: cp-global
+ actions:
+ - method: replace
+ path: .interfaces
+ - method: replace
+ path: .storage
+ - method: merge
+ path: .
+data:
+ hardware_profile: dell_r720
+
+ primary_network: oam
+ interfaces:
+ pxe:
+ device_link: pxe
+ slaves:
+ - pxe_nic01
+ networks:
+ - pxe
+ bond0:
+ device_link: data
+ slaves:
+ - data_nic01
+ - data_nic02
+ networks:
+ - oam
+ - storage
+ - overlay
+ - calico
+ dpdk:
+ device_link: dpdk
+ slaves:
+ - dpdk_nic01
+ networks:
+ - dpdk
+ storage:
+ physical_devices:
+{% for disk in yaml.disks %}
+ {{disk.name}}:
+ {% if 'labels' in disk %}
+ labels:
+ {% for key, value in disk.labels.items() %}
+ {{key}}: '{{value}}'
+ {% endfor %}
+ {% endif %}
+ partitions:
+ {% for p in disk.partitions %}
+ - name: '{{p.name}}'
+ size: '{{p.size}}'
+ {% if 'bootable' in p %}
+ bootable: {{p.bootable}}
+ {% endif %}
+ filesystem:
+ mountpoint: '{{p.mountpoint}}'
+ fstype: 'ext4'
+ mount_options: 'defaults'
+ {% endfor %}
+{% endfor %}
+
+ platform:
+ kernel: 'hwe-16.04'
+ kernel_params:
+{% if 'platform' in yaml and 'kernel_params' in yaml.platform %}
+{% for key, value in yaml.platform.kernel_params.items() %}
+ {{key}}: '{{value}}'
+{% endfor %}
+{% else %}
+ console: 'ttyS1,115200n8'
+ intel_iommu: 'on'
+ iommu: 'pt'
+ amd_iommu: 'on'
+ transparent_hugepage: 'never'
+{% endif %}
+{% if 'platform' in yaml and 'vcpu_pin_set' in yaml.platform %}
+ isolcpus: '{{yaml.platform.vcpu_pin_set}}'
+{% endif %}
+
+ metadata:
+ owner_data:
+ control-plane: enabled
+ ucp-control-plane: enabled
+ openstack-control-plane: enabled
+ openstack-heat: enabled
+ openstack-keystone: enabled
+ openstack-rabbitmq: enabled
+ openstack-dns-helper: enabled
+ openstack-mariadb: enabled
+ openstack-nova-control: enabled
+ # openstack-etcd: enabled
+ openstack-mistral: enabled
+ openstack-memcached: enabled
+ openstack-glance: enabled
+ openstack-horizon: enabled
+ openstack-cinder-control: enabled
+ openstack-cinder-volume: control
+ openstack-neutron: enabled
+ openvswitch: enabled
+ ucp-barbican: enabled
+ ceph-mon: enabled
+ ceph-mgr: enabled
+ ceph-osd: enabled
+ ceph-mds: enabled
+ ceph-rgw: enabled
+ ucp-maas: enabled
+ kube-dns: enabled
+ tenant-ceph-control-plane: enabled
+ tenant-ceph-mon: enabled
+ tenant-ceph-osd: enabled
+ tenant-ceph-rgw: enabled
+ tenant-ceph-mgr: enabled
+ kubernetes-apiserver: enabled
+ kubernetes-controller-manager: enabled
+ kubernetes-etcd: enabled
+ kubernetes-scheduler: enabled
+ tiller-helm: enabled
+ kube-etcd: enabled
+ calico-policy: enabled
+ calico-node: enabled
+ calico-etcd: enabled
+ ucp-armada: enabled
+ ucp-drydock: enabled
+ ucp-deckhand: enabled
+ ucp-shipyard: enabled
+ IAM: enabled
+ ucp-promenade: enabled
+ prometheus-server: enabled
+ prometheus-client: enabled
+ fluentd: enabled
+ fluentbit: enabled
+ influxdb: enabled
+ kibana: enabled
+ elasticsearch-client: enabled
+ elasticsearch-master: enabled
+ elasticsearch-data: enabled
+ postgresql: enabled
+ kube-ingress: enabled
+ beta.kubernetes.io/fluentd-ds-ready: 'true'
+ node-exporter: enabled
+ openstack-nova-compute: enabled
+ openstack-libvirt: kernel
+ sriov: enabled
+...
+---
+schema: drydock/HostProfile/v1
+metadata:
+ schema: metadata/Document/v1
+ name: cp_r720-secondary
+ storagePolicy: cleartext
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ hosttype: cp-global
+ actions:
+ - method: replace
+ path: .interfaces
+ - method: replace
+ path: .storage
+ - method: replace
+ path: .metadata.owner_data
+ - method: merge
+ path: .
+data:
+ hardware_profile: dell_r720
+
+ primary_network: oam
+ interfaces:
+ pxe:
+ device_link: pxe
+ slaves:
+ - pxe_nic01
+ networks:
+ - pxe
+ bond0:
+ device_link: data
+ slaves:
+ - data_nic01
+ - data_nic02
+ networks:
+ - oam
+ - storage
+ - overlay
+ - calico
+ dpdk:
+ device_link: dpdk
+ slaves:
+ - dpdk_nic01
+ networks:
+ - dpdk
+ storage:
+ physical_devices:
+{% for disk in yaml.disks %}
+ {{disk.name}}:
+ {% if 'labels' in disk %}
+ labels:
+ {% for key, value in disk.labels.items() %}
+ {{key}}: '{{value}}'
+ {% endfor %}
+ {% endif %}
+ partitions:
+ {% for p in disk.partitions %}
+ - name: '{{p.name}}'
+ size: '{{p.size}}'
+ {% if 'bootable' in p %}
+ bootable: {{p.bootable}}
+ {% endif %}
+ filesystem:
+ mountpoint: '{{p.mountpoint}}'
+ fstype: 'ext4'
+ mount_options: 'defaults'
+ {% endfor %}
+{% endfor %}
+
+ platform:
+ kernel: 'hwe-16.04'
+ kernel_params:
+{% if 'platform' in yaml and 'kernel_params' in yaml.platform %}
+{% for key, value in yaml.platform.kernel_params.items() %}
+ {{key}}: '{{value}}'
+{% endfor %}
+{% else %}
+ console: 'ttyS1,115200n8'
+ intel_iommu: 'on'
+ iommu: 'pt'
+ amd_iommu: 'on'
+ transparent_hugepage: 'never'
+{% endif %}
+{% if 'platform' in yaml and 'vcpu_pin_set' in yaml.platform %}
+ isolcpus: '{{yaml.platform.vcpu_pin_set}}'
+{% endif %}
+
+ metadata:
+ owner_data:
+ openstack-nova-compute: enabled
+ openvswitch: enabled
+ tenant-ceph-osd: enabled
+ openstack-libvirt: kernel
+ sriov: enabled
+ beta.kubernetes.io/fluentd-ds-ready: 'true'
+...
--- /dev/null
+---
+# The data plane host profile for Airship for DELL R720s, and should
+# not need to be altered if you are using matching HW. The host profile is setup
+# for cpu isolation (for nova pinning), hugepages, and sr-iov.
+schema: drydock/HostProfile/v1
+metadata:
+ schema: metadata/Document/v1
+ name: dp_r720
+ storagePolicy: cleartext
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ hosttype: dp-global
+ actions:
+ - method: replace
+ path: .interfaces
+ - method: replace
+ path: .storage
+ - method: merge
+ path: .
+data:
+ hardware_profile: dell_r720
+
+ primary_network: oam
+ interfaces:
+ pxe:
+ device_link: pxe
+ slaves:
+ - pxe_nic01
+ networks:
+ - pxe
+ bond0:
+ device_link: data
+ slaves:
+ - data_nic01
+ - data_nic02
+ networks:
+ - oam
+ - storage
+ - overlay
+ - calico
+ dpdk:
+ device_link: dpdk
+ slaves:
+ - dpdk_nic01
+ networks:
+ - dpdk
+ storage:
+ physical_devices:
+{% for disk in yaml.disks %}
+ {{disk.name}}:
+ {% if 'labels' in disk %}
+ labels:
+ {% for key, value in disk.labels.items() %}
+ {{key}}: '{{value}}'
+ {% endfor %}
+ {% endif %}
+ partitions:
+ {% for p in disk.partitions %}
+ - name: '{{p.name}}'
+ size: '{{p.size}}'
+ {% if 'bootable' in p %}
+ bootable: {{p.bootable}}
+ {% endif %}
+ filesystem:
+ mountpoint: '{{p.mountpoint}}'
+ fstype: 'ext4'
+ mount_options: 'defaults'
+ {% endfor %}
+{% endfor %}
+
+ platform:
+ kernel: 'hwe-16.04'
+ kernel_params:
+{% if 'platform' in yaml and 'kernel_params' in yaml.platform %}
+{% for key, value in yaml.platform.kernel_params.items() %}
+ {{key}}: '{{value}}'
+{% endfor %}
+{% else %}
+ console: 'ttyS1,115200n8'
+ intel_iommu: 'on'
+ iommu: 'pt'
+ amd_iommu: 'on'
+ transparent_hugepage: 'never'
+{% endif %}
+{% if 'platform' in yaml and 'vcpu_pin_set' in yaml.platform %}
+ isolcpus: '{{yaml.platform.vcpu_pin_set}}'
+{% endif %}
+...
--- /dev/null
+---
+# The purpose of this file is to define the drydock Region, which in turn drives
+# the MaaS region.
+schema: 'drydock/Region/v1'
+metadata:
+ schema: 'metadata/Document/v1'
+ # NEWSITE-CHANGEXX: Replace with the site name
+ name: {{yaml.site_name}}
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+ substitutions:
+ # NEWSITE-CHANGEXX: Substitutions from deckhand SSH public keys into the
+ # list of authorized keys which MaaS will register for the build-in "ubuntu"
+ # account during the PXE process. Create a substitution rule for each SSH
+ # key that should have access to the "ubuntu" account (useful for trouble-
+ # shooting problems before UAM or UAM-lite is operational). SSH keys are
+ # stored as secrets in site/seaworthy/secrets.
+ - dest:
+ # Add/replace the first item in the list
+ path: .authorized_keys[0]
+ src:
+ schema: deckhand/PublicKey/v1
+ # This should match the "name" metadata of the SSH key which will be
+ # substituted, located in site/seaworthy/secrets folder.
+ name: airship_ssh_public_key
+ path: .
+ - dest:
+ path: .repositories.main_archive
+ src:
+ schema: pegleg/SoftwareVersions/v1
+ name: software-versions
+ path: .packages.repositories.main_archive
+ # Second key example
+ - dest:
+ # Increment the list index
+ path: .authorized_keys[1]
+ src:
+ schema: deckhand/PublicKey/v1
+ # your ssh key
+ name: localadmin_ssh_public_key
+ path: .
+data:
+ tag_definitions: []
+ # This is the list of SSH keys which MaaS will register for the built-in
+ # "ubuntu" account during the PXE process. This list is populated by
+ # substitution, so the same SSH keys do not need to be repeated in multiple
+ # manifests.
+ authorized_keys: []
+ repositories:
+ remove_unlisted: true
+...
--- /dev/null
+---
+# Example manifest for ingress cert.
+# NEWSITE-CHANGEME: must be replaced with proper/valid set,
+# self-signed certs are not supported.
+metadata:
+ layeringDefinition:
+ abstract: false
+ layer: site
+ name: ingress-crt
+ schema: metadata/Document/v1
+ labels:
+ name: ingress-crt-site
+ storagePolicy: cleartext
+schema: deckhand/Certificate/v1
+data: |
+ -----BEGIN CERTIFICATE-----
+ MIIFKzCCA5OgAwIBAgIMW2h6FCcFdKeaw3vnMA0GCSqGSIb3DQEBCwUAMBIxEDAO
+ BgNVBAMTB0FpcnNoaXAwHhcNMTgwODA2MTY0MDUyWhcNMTkwODA2MTY0MDUyWjBJ
+ MTUwMwYDVQQDEyxpbmdyZXNzLmFpcnNoaXAtc2Vhd29ydGh5LmF0bGFudGFmb3Vu
+ ZHJ5LmNvbTEQMA4GA1UEChMHQWlyc2hpcDCCAaIwDQYJKoZIhvcNAQEBBQADggGP
+ ADCCAYoCggGBALvNHm/G/ylh6aPcvrhOcb4qz1BjcNtnxH8bzZng/rMeX3W2AzjC
+ r2JloJcDvOLBp/TkLOZPImnFW2/GCwktxPgXZuBTPzFV50g77KsPFw0fn3Si7+bs
+ F22tLhdOGk6MQj/WW4pKGHqdw1/VbPwOHBT+I4/scR1L2SZxYtSFIKGenHJH+PMV
+ bCdwnNOR80F8KRzK5iZs/r6S/QqVheieARSWWnk2+TtkM1BloGOhLSd+ZkWh9VO1
+ eOnZowkaDAJwD/G6zoSr5n+beaXzDnEcoVXFSwd4FLoV+om77o92XmZ4rVw0vTMO
+ k6jVwmkdT+dM2K2hLUG/TXWoV2/Qms70gzDOs85RtAkTPe4Ohtdpr51Q0hd35TKG
+ YLKzX/OPblD68iYJYSBvMPpAVTbFYVPW1AQx8wWfannYbMoeL8XTEOKfkqm90YP9
+ EhIdtmw4D7GZxlzG5FXXutmT9sqLfqlRu/RynAhBP8NQvw74WumhOe8r7GhCwgzC
+ gaPLGjeekoS6LQIDAQABo4IBSDCCAUQwDAYDVR0TAQH/BAIwADCBzQYDVR0RBIHF
+ MIHCgixpbmdyZXNzLmFpcnNoaXAtc2Vhd29ydGh5LmF0bGFudGFmb3VuZHJ5LmNv
+ bYIta2V5c3RvbmUuYWlyc2hpcC1zZWF3b3J0aHkuYXRsYW50YWZvdW5kcnkuY29t
+ gilub3ZhLmFpcnNoaXAtc2Vhd29ydGh5LmF0bGFudGFmb3VuZHJ5LmNvbYIsaG9y
+ aXpvbi5haXJzaGlwLXNlYXdvcnRoeS5hdGxhbnRhZm91bmRyeS5jb22HBAoXFQuH
+ BAoXFgswEwYDVR0lBAwwCgYIKwYBBQUHAwEwDwYDVR0PAQH/BAUDAwegADAdBgNV
+ HQ4EFgQUfTAjNgn/1U1Uh1MJDYT2m4dzhsYwHwYDVR0jBBgwFoAUJFuXPZo6RzfE
+ BlJjnnk5jhcP4wIwDQYJKoZIhvcNAQELBQADggGBAE2ISWmrxqrledJI3aLaS9Yw
+ WsZc8O8CnIyLoxrE85vUubFjuI9ixC/6dJxl2iB1n0H8JgmFREox32Q4+kDJI8V/
+ X9x0PFpRzL7QEPrLZhW94Yis3sOphLW0rf0t06ZepdHHeodYJu1pVMDmLq6bKXdX
+ vo+/WwKnZBXC1qPbXJByv/CN9MtViXOnBGORFRTJPb6U8379LNWclJ/LW12yTwNk
+ JGIbZU61Vxu+2nLIabmmRoODH2jomgMOMMzLgjT3Hvw3whe8GrUoxDiPYQVTDGNm
+ ly6m+5B1Nx06fkZazonozeaOhSQ7RblUSbo+w8TJmLRzD9ft7p4vpjBGxRADMcuF
+ DOjATgdZeisBUHTGEO0P6wJOBQuCFMX9AVl+u8ZpcuRaRaN+pBE6/BqcHBB6qV/N
+ w2DdNtP8BrJ3kJVNEDIo5oTbH5SToxgA4hWBV42M1rB+5vIMDKN3rwVDdNKWYhYc
+ VZpU3V9V6JzSW1O2w4Wu9PdbWJD9oSvC0qJgnjOXzg==
+ -----END CERTIFICATE-----
+...
+---
+metadata:
+ layeringDefinition:
+ abstract: false
+ layer: site
+ name: ingress-ca
+ schema: metadata/Document/v1
+ labels:
+ name: ingress-ca-site
+ storagePolicy: cleartext
+schema: deckhand/CertificateAuthority/v1
+data: |
+ -----BEGIN CERTIFICATE-----
+ MIID7TCCAlWgAwIBAgIMW2h3tgSwie0Ypx8eMA0GCSqGSIb3DQEBCwUAMBIxEDAO
+ BgNVBAMTB0FpcnNoaXAwHhcNMTgwODA2MTYzMDQ2WhcNMTkwODA2MTYzMDQ2WjAS
+ MRAwDgYDVQQDEwdBaXJzaGlwMIIBojANBgkqhkiG9w0BAQEFAAOCAY8AMIIBigKC
+ AYEAny0Nqu9U2tXdCCTNzD2T62htMmBLg3CmzWajfbfFl7ALqzo3HgbbY3PxTHDE
+ OJ/lwdm0HkEaGfEDXhJd06WZsa8+fKGqhKXvZXwXx5mJ8LCGxz6xiaxwo9lnKe6V
+ o3YX7bJ5YIVxQ2jhvZo+dY8Z/buloi2Tp2HbqTejKULH9+qdiQTDXAnyR0NLqzJ0
+ YQ4v4yU3zix3nBi8z29lQekGO9quNEka3nw2n0Gxmq5z1bNALGCF5F759mVkB0uT
+ fPGF+zm9eqlqAgduYg7R+JYUumVHvIoRY454GtAdZHTJHJZP0gQSGJsLff8ROFpI
+ GVYsOZhJXU9Ihc5VBC5PMErbmCn0YkuxAWNOYBstZ8l+uY6YiPoFV5Ulc/8M0If+
+ T6jbqzWoFC+4ysgY95RKOw53S4o/T6AFwiIKIw0xp3UfHCf6kr5Y0+XdDn5CXpJB
+ d1KK3PoUWzPSsxcUMXvgKWT4x1vsCId21dn1SmVSOEBhM08VZfjd5bvL9Xjt/E0j
+ mUqDAgMBAAGjQzBBMA8GA1UdEwEB/wQFMAMBAf8wDwYDVR0PAQH/BAUDAwcEADAd
+ BgNVHQ4EFgQUJFuXPZo6RzfEBlJjnnk5jhcP4wIwDQYJKoZIhvcNAQELBQADggGB
+ AJaoEtnDoWUUs4nSSqIGcoCfpIO0oqVp8DvkBOcxz5Rz8vMVJSC24/UnuCD2Wknx
+ 2V/E3edXIeRo7duhPtNCT7c8OKY/pJsZQTgOczn4rphoD1pmAIPZmpG6ssPadPiM
+ EP8xWJHZt8NXG7D5kJX2COvBvgNeWXL6MF7Tv8+t5xzt59Vitdb/7lm9Z6jjpvN+
+ zoG0pKx3XYESsnLAVAf00F+kWwds/3x3gQywUAQUDER0jliYUE5id+sojp357Cl9
+ XtY+8zSnTduuP8CfMhwv5p6j9xbqacfT7AzpQ6cy4xcQ7MA6JBQcxbaq4NtvIf6+
+ d/5N9d8LGnfXdCd9iwNy9Qk23Ea0SNhnk9F/NqGBPakU4TbHh4iTYMC/+hDGInpO
+ TIRelTidNBFNaIBg3Z0vsh0lDwbt/xhpXip+ZVBqKMTtktEceiVGru9cYUQA2tKI
+ XNoc5s0uQGMpdFzgED4lXZf+n7yGVMKohvi7Yn96HqujGIrVH6qThsI6m7pUSz40
+ +g==
+ -----END CERTIFICATE-----
+...
+---
+metadata:
+ layeringDefinition:
+ abstract: false
+ layer: site
+ name: ingress-key
+ schema: metadata/Document/v1
+ labels:
+ name: ingress-key-site
+ storagePolicy: cleartext
+schema: deckhand/CertificateKey/v1
+data: |
+ -----BEGIN RSA PRIVATE KEY-----
+ MIIG4wIBAAKCAYEAu80eb8b/KWHpo9y+uE5xvirPUGNw22fEfxvNmeD+sx5fdbYD
+ OMKvYmWglwO84sGn9OQs5k8iacVbb8YLCS3E+Bdm4FM/MVXnSDvsqw8XDR+fdKLv
+ 5uwXba0uF04aToxCP9ZbikoYep3DX9Vs/A4cFP4jj+xxHUvZJnFi1IUgoZ6cckf4
+ 8xVsJ3Cc05HzQXwpHMrmJmz+vpL9CpWF6J4BFJZaeTb5O2QzUGWgY6EtJ35mRaH1
+ U7V46dmjCRoMAnAP8brOhKvmf5t5pfMOcRyhVcVLB3gUuhX6ibvuj3ZeZnitXDS9
+ Mw6TqNXCaR1P50zYraEtQb9NdahXb9CazvSDMM6zzlG0CRM97g6G12mvnVDSF3fl
+ MoZgsrNf849uUPryJglhIG8w+kBVNsVhU9bUBDHzBZ9qedhsyh4vxdMQ4p+Sqb3R
+ g/0SEh22bDgPsZnGXMbkVde62ZP2yot+qVG79HKcCEE/w1C/Dvha6aE57yvsaELC
+ DMKBo8saN56ShLotAgMBAAECggGAYzZDhA1+sx/0zApL/xYB5NK83t0Ju/8fwX6w
+ qUBBjeLXz1mubgf7m2HQ6ragzLI9xpPcXHcl2PbYDT50ig7R5baHNK8FzUxyeKif
+ qOa56Mbx+C4zyqyi2+AHX2x1XVWfkhXuGip2sCA0HKalgqr5juWLZ/ci8rUlLLft
+ 3BPQX1FpmL4I+HIyxsspLmQGPGwZVAqkd1xRX+BLKZJAQdlm/LdJaIvwMr4Glcx6
+ ZOe68QhHgzXCYsyV6gR9qstF2OvVuLa2mUc7EzYInFIFhXUdAAwmDqkuuLRdRQhf
+ Ur8nqQW33T0cG0GBUzgBI5YmSPJvTSzcPmeSyNVx2/Yb0pkuXtCw67oDcAsN4nW8
+ uls49E2RaiLJYsy5vPsX5aJNcAxw/CWLdadQ3ukviD/MDJbpTl4F52GOVYL6K4XH
+ g5TJjj7xzjmK3ldR/Kscg7HpCitQLGUYdgIsAFdspXf4aSIa68IjDrc5NsJZuMzc
+ PbVHrw7QYNfHY7VNdUlOVqH5lS3BAoHBANRqKrQXtnJmM006TCEJXdcN/5M685jz
+ +L4Ox0Rhrq8ROgcN5q/hjKb6kP/MccQ9voGQOl9TKEyinGNdTtyc/fuH7RNlQwpS
+ HT+vEzVEcrSe8UFs8c6oJnHFO72ylFcibFf56LvbI3L8BZXp7gPSPQkp5f1NWEZk
+ X5bUL4UNiOm0diltba/ofxywF0M9WGD00eqi0Q29JRlvun+355j06CENxRoonNZC
+ wk1evIxhhckP9zLjI2Ykb1hV6yzwPWtmyQKBwQDiVgru/B396KhzDhLl5AL+pBWA
+ GsfiCbmPLh6W6V5VzldB4+GlMRrJ4zSjZQ3/nvX5KepqjMn1N6LQpZQUI/YShCKE
+ mW0XMiAfbp2d23MRMjLD8L/bIoBHQOPkCaMjbmyDOlCagWakEvHJO/TieVgTmYk6
+ mtEYVjJFWI9OCNMAHdl8ovWr3p+8YbVZ8LLv5ZO/V1cIjczoNQ6p8LG/pPMTDLXM
+ ScN9a8z3f8LQLBHBlu0155xvt95PQLAon/x21kUCgcAvPVk36hoiQQZhw3hQ1JNx
+ E2TmanLobkHAiurYE11VA+DC1t2Z+fBc5la+/MnEWfL3P4srzgOlX3imRIcYWzXE
+ 7crUyG1ray2kDxyXeRyFfN+srDzut8is/q81lfSVmEs+GY8f0DGHDfN0Dq1nXidC
+ 1XWXqs7aANKdaZ0T2xm61+57ciG1wGAckjDqPEdecLQKmaEijBEnIgj5BH5WLwk8
+ 6KIQGj4fDIPHzyzhj4LAX3ObdpZVzf6RR7JgsSEHtLkCgcBROW2dDC87MqZY++D+
+ TVBhz8LDgVjgHntQDc3+fGtVQcKAq+YLYU7qyrXWOWrHpGVDcK5mZHYJoVi1peY5
+ QBqL1I2KpoDGxT9P6GN6BgoKTsh3FsvTOVNtvrTJ3keEbJlWkrPgbrXGBeJtRC4C
+ pGdeSUg9FtgY8r4BsuFisLoAHbYyC008y5zpfusVBtNAUlQuY4qhUDoLzxafF/jB
+ /NEasgH/+SzFss0QuPHRwS7yGVaxdJfoY8TNDjrpqVhx0T0CgcEAvKG4UoWvT8gJ
+ pIeeAxxnv9yrMxgpntu4RXPDHgfX5tva6EaM3r3nLXjd9FVtlQ4cNBMhp9HNhS3a
+ dK+oEDcBysVxxfltlS2Bx0+gQf3WxgBCJwayKe3i/XCDza92EENgxTPmqB1LHiq5
+ 2b5aOl2Y5fP0eX6UryxRc443c/ejMHw4lGwnno0qpRk9M9Ucqv5J96QCfAlBSQQS
+ gOG9cypL0kBWzCejn9W4av8HkM8Noqd7Tqul1onv/46OBaX51kt3
+ -----END RSA PRIVATE KEY-----
+...
--- /dev/null
+---
+schema: deckhand/Passphrase/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ipmi_admin_password
+ layeringDefinition:
+ abstract: false
+ layer: site
+ labels:
+ name: ipmi-admin-password-site
+ storagePolicy: cleartext
+data: {{yaml.ipmi_admin.password}}
+...
--- /dev/null
+---
+schema: deckhand/Passphrase/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ubuntu_crypt_password
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+# Pass: password123
+data: {{yaml.genesis.root_password | crypt_sha512}}
+...
--- /dev/null
+---
+##############################################################################
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may #
+# not use this file except in compliance with the License. #
+# #
+# You may obtain a copy of the License at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+##############################################################################
+
+schema: deckhand/PublicKey/v1
+metadata:
+ schema: metadata/Document/v1
+ name: localadmin_ssh_public_key
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data: {{yaml.genesis_ssh_public_key}}
+...
--- /dev/null
+---
+# High-level pegleg site definition file
+schema: pegleg/SiteDefinition/v1
+metadata:
+ schema: metadata/Document/v1
+ layeringDefinition:
+ abstract: false
+ layer: site
+ # NEWSITE-CHANGEME: Replace with the site name
+ name: {{yaml.site_name}}
+ storagePolicy: cleartext
+data:
+ # The type layer this site will delpoy with. Type layer is found in the
+ # type folder.
+ site_type: foundry
+...
--- /dev/null
+---
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: kubernetes-calico
+ replacement: true
+ #labels:
+ # name: kubernetes-calico-global
+ # component: kubernetes-calico
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: kubernetes-calico-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+ substitutions:
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .calico.bgp.ipv4.public_service_cidr
+ dest:
+ path: .values.networking.bgp.ipv4.additional_cidrs[0]
+
+data:
+ values:
+ networking:
+ mtu: 1500
+ settings:
+{% if ('peers' in yaml.networks.ksn and yaml.networks.ksn.peers is not none and yaml.networks.ksn.peers is iterable ) %}
+ mesh: "off"
+ ippool:
+ ipip:
+ enabled: "false"
+{% else %}
+ mesh: "on"
+ ippool:
+ ipip:
+ enabled: "true"
+ mode: "Always"
+ nat_outgoing: "true"
+ disabled: "false"
+{% endif %}
+ bgp:
+ asnumber: {{yaml.networks.ksn.local_asnumber}}
+ ipv4:
+ additional_cidrs:
+{% for add_cidr in yaml.networks.ksn.additional_cidrs %}
+ - {{add_cidr}}
+{% endfor %}
+{% if ('peers' in yaml.networks.ksn and yaml.networks.ksn.peers is not none and yaml.networks.ksn.peers is iterable ) %}
+ peers:
+{% for peer in yaml.networks.ksn.peers %}
+ - apiVersion: projectcalico.org/v3
+ kind: BGPPeer
+ metadata:
+ name: peer-{{loop.index-1}}
+ spec:
+ peerIP: {{peer.ip}}
+ asnumber: {{peer.asnumber}}
+{% endfor %}
+{% endif %}
+...
--- /dev/null
+---
+# The purpose of this file is to build the list of calico etcd nodes and the
+# calico etcd certs for those nodes in the environment.
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: kubernetes-calico-etcd
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: kubernetes-calico-etcd-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+ substitutions:
+ # Generate a list of control plane nodes (i.e. genesis node + master node
+ # list) on which calico etcd will run and will need certs. It is assumed
+ # that Airship sites will have 4 control plane nodes, so this should not need to
+ # change for a new site.
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .genesis.hostname
+ dest:
+ path: .values.nodes[0].name
+{% for server in yaml.masters %}
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .masters[{{loop.index-1}}].hostname
+ dest:
+ path: .values.nodes[{{loop.index}}].name
+{% endfor %}
+
+ # Certificate substitutions for the node names assembled on the above list.
+ # NEWSITE-CHANGEME: Per above, the number of substitutions should not need
+ # to change with a standard Airship deployment. However, the names of each
+ # deckhand certficiate should be updated with the correct hostnames for your
+ # environment. The ordering is important (Genesis is index 0, then master
+ # nodes in the order they are specified in common-addresses).
+
+ # Genesis node {{yaml.genesis.name}}
+ - src:
+ schema: deckhand/Certificate/v1
+ name: calico-etcd-{{yaml.genesis.name}}
+ path: .
+ dest:
+ path: .values.nodes[0].tls.client.cert
+ - src:
+ schema: deckhand/CertificateKey/v1
+ name: calico-etcd-{{yaml.genesis.name}}
+ path: .
+ dest:
+ path: .values.nodes[0].tls.client.key
+ - src:
+ schema: deckhand/Certificate/v1
+ name: calico-etcd-{{yaml.genesis.name}}-peer
+ path: .
+ dest:
+ path: .values.nodes[0].tls.peer.cert
+ - src:
+ schema: deckhand/CertificateKey/v1
+ name: calico-etcd-{{yaml.genesis.name}}-peer
+ path: .
+ dest:
+ path: .values.nodes[0].tls.peer.key
+{% for server in yaml.masters %}
+
+ # Master node {{server.name}}
+ - src:
+ schema: deckhand/Certificate/v1
+ name: calico-etcd-{{server.name}}
+ path: .
+ dest:
+ path: .values.nodes[{{loop.index}}].tls.client.cert
+ - src:
+ schema: deckhand/CertificateKey/v1
+ name: calico-etcd-{{server.name}}
+ path: .
+ dest:
+ path: .values.nodes[{{loop.index}}].tls.client.key
+ - src:
+ schema: deckhand/Certificate/v1
+ name: calico-etcd-{{server.name}}-peer
+ path: .
+ dest:
+ path: .values.nodes[{{loop.index}}].tls.peer.cert
+ - src:
+ schema: deckhand/CertificateKey/v1
+ name: calico-etcd-{{server.name}}-peer
+ path: .
+ dest:
+ path: .values.nodes[{{loop.index}}].tls.peer.key
+{% endfor %}
+
+data: {}
+...
--- /dev/null
+---
+# The purpose of this file is to build the list of k8s etcd nodes and the
+# k8s etcd certs for those nodes in the environment.
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: kubernetes-etcd
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: kubernetes-etcd-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+ substitutions:
+ # Generate a list of control plane nodes (i.e. genesis node + master node
+ # list) on which k8s etcd will run and will need certs. It is assumed
+ # that Airship sites will have 4 control plane nodes, so this should not need to
+ # change for a new site.
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .genesis.hostname
+ dest:
+ path: .values.nodes[0].name
+{% for server in yaml.masters %}
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .masters[{{loop.index-1}}].hostname
+ dest:
+ path: .values.nodes[{{loop.index}}].name
+{% endfor %}
+
+ # Certificate substitutions for the node names assembled on the above list.
+ # NEWSITE-CHANGEME: Per above, the number of substitutions should not need
+ # to change with a standard Airship deployment. However, the names of each
+ # deckhand certficiate should be updated with the correct hostnames for your
+ # environment. The ordering is important (Genesis is index 0, then master
+ # nodes in the order they are specified in common-addresses).
+
+ # Genesis Exception*
+ # *NOTE: This is an exception in that `genesis` is not the hostname of the
+ # genesis node, but `genesis` is reference here in the certificate names
+ # because of certain Promenade assumptions that may be addressed in the
+ # future. Therefore `genesis` is used instead of `cab23-r720-11` here.
+
+ # Genesis node {{yaml.genesis.name}}
+ - src:
+ schema: deckhand/Certificate/v1
+ name: kubernetes-etcd-genesis
+ path: .
+ dest:
+ path: .values.nodes[0].tls.client.cert
+ - src:
+ schema: deckhand/CertificateKey/v1
+ name: kubernetes-etcd-genesis
+ path: .
+ dest:
+ path: .values.nodes[0].tls.client.key
+ - src:
+ schema: deckhand/Certificate/v1
+ name: kubernetes-etcd-genesis-peer
+ path: .
+ dest:
+ path: .values.nodes[0].tls.peer.cert
+ - src:
+ schema: deckhand/CertificateKey/v1
+ name: kubernetes-etcd-genesis-peer
+ path: .
+ dest:
+ path: .values.nodes[0].tls.peer.key
+{% for server in yaml.masters %}
+
+ # Master node {{loop.index}} hostname - {{server.name}}
+ - src:
+ schema: deckhand/Certificate/v1
+ name: kubernetes-etcd-{{server.name}}
+ path: .
+ dest:
+ path: .values.nodes[{{loop.index}}].tls.client.cert
+ - src:
+ schema: deckhand/CertificateKey/v1
+ name: kubernetes-etcd-{{server.name}}
+ path: .
+ dest:
+ path: .values.nodes[{{loop.index}}].tls.client.key
+ - src:
+ schema: deckhand/Certificate/v1
+ name: kubernetes-etcd-{{server.name}}-peer
+ path: .
+ dest:
+ path: .values.nodes[{{loop.index}}].tls.peer.cert
+ - src:
+ schema: deckhand/CertificateKey/v1
+ name: kubernetes-etcd-{{server.name}}-peer
+{% if loop.first %}
+ path: .
+{% else %}
+ path: $
+{% endif %}
+ dest:
+ path: .values.nodes[{{loop.index}}].tls.peer.key
+{% endfor %}
+
+data: {}
+...
--- /dev/null
+---
+# The purpose of this file is to define the environment-specific public-facing
+# VIP for the ingress controller
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ingress-kube-system
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ ingress: kube-system
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+ substitutions:
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .vip.ingress_vip
+ dest:
+ path: .values.network.vip.addr
+data:
+ values:
+ network:
+ ingress:
+ disable-ipv6: "true"
+ vip:
+ manage: true
+...
--- /dev/null
+---
+##############################################################################
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may #
+# not use this file except in compliance with the License. #
+# #
+# You may obtain a copy of the License at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+##############################################################################
+# This file defines hardware-specific settings for neutron. If you use the same
+# hardware profile as this environment, you should not need to change this file.
+# Otherwise, you should review the settings here and adjust for your hardware.
+# In particular:
+# 1. logical network interface names
+# 2. physical device mappigns
+# TODO: Should move to global layer and become tied to the hardware profile
+
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: neutron
+ replacement: true
+ labels:
+ component: neutron
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: neutron-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+
+data:
+ values:
+ network:
+ backend:
+ - openvswitch
+ interface:
+ tunnel: br-phy
+ conf:
+ plugins:
+ ml2_conf:
+ ml2:
+ mechanism_drivers: openvswitch,l2population
+ type_drivers: flat,vlan,vxlan
+ tenant_network_types: vxlan
+ ml2_type_vlan:
+ network_vlan_ranges: "external:3002:3008"
+ ml2_type_flat:
+ flat_networks: public
+ openvswitch_agent:
+ agent:
+ tunnel_types: vxlan
+ ovs:
+ bridge_mappings: external:br-phy
+ ovs_dpdk:
+ enabled: true
+ driver: vfio-pci
+ nics:
+{% for nic in yaml.dpdk.nics %}
+ - name: {{ nic.name }}
+ pci_id: '{{ nic.pci_id }}'
+ bridge: {{ nic.bridge }}
+ migrate_ip: {{ nic.migrate_ip }}
+{% endfor %}
+ bridges:
+ - name: br-phy
+ bonds: []
+ paste:
+ app:neutronversions:
+ paste.app_factory: neutron.pecan_wsgi.app:versions_factory
+ dependencies:
+ - openstack-neutron-helm-toolkit
+---
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: openstack-neutron-helm-toolkit
+ layeringDefinition:
+ abstract: false
+ layer: global
+ storagePolicy: cleartext
+ substitutions:
+ - src:
+ schema: pegleg/SoftwareVersions/v1
+ name: software-versions
+ path: .charts.osh.neutron-htk
+ dest:
+ path: .source
+data:
+ chart_name: openstack-neutron-helm-toolkit
+ release: openstack-neutron-helm-toolkit
+ namespace: helm-toolkit
+ values: {}
+ dependencies: []
+...
--- /dev/null
+---
+##############################################################################
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may #
+# not use this file except in compliance with the License. #
+# #
+# You may obtain a copy of the License at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+##############################################################################
+# This file defines hardware-specific settings for nova. If you use the same
+# hardware profile as this environment, you should not need to change this file.
+# Otherwise, you should review the settings here and adjust for your hardware.
+# In particular:
+# 1. vcpu_pin_set will change if the number of logical CPUs on the hardware
+# changes.
+# 2. pci alias / passthrough_whitelist could change if the NIC type or NIC
+# slotting changes.
+# TODO: Should move to global layer and become tied to the hardware profile
+
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: nova
+ labels:
+ component: nova
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: nova-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+data:
+ values:
+ network:
+ backend:
+ - openvswitch
+ conf:
+ nova:
+ filter_scheduler:
+ enabled_filters: "RetryFilter, AvailabilityZoneFilter, RamFilter, ComputeFilter, ComputeCapabilitiesFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter, ServerGroupAffinityFilter, PciPassthroughFilter, NUMATopologyFilter, DifferentHostFilter, SameHostFilter"
+ libvirt:
+ virt_type: kvm
+ DEFAULT:
+ vcpu_pin_set: {% if 'platform' in yaml and 'vcpu_pin_set' in yaml.platform %}"{{yaml.platform.vcpu_pin_set}}"
+{% else %}"4-21,26-43,48-65,72-87"
+{% endif %}
+ vif_plugging_is_fatal: False
+ vif_plugging_timeout: 30
+ pci:
+{% if 'gpu' in yaml or 'sriov' in yaml %}
+ alias: |
+ {% if 'sriov' in yaml and 'alias' in yaml.sriov %}
+ {% for alias in yaml.sriov.alias %}
+ '{"name": "{{alias.name}}", "vendor_id": "{{alias.vendor_id}}", "product_id": "{{alias.product_id}}", "capability_type": "pci", "device_type": "type-PCI", "numa_policy": "required"}'
+ {% endfor %}
+ {% endif %}
+ {% if 'gpu' in yaml and 'alias' in yaml.gpu %}
+ {% for alias in yaml.gpu.alias %}
+ '{"name":"{{alias.name}}", "vendor_id":"{{alias.vendor_id}}", "product_id":"{{alias.product_id}}", "device_type":"type-PCI"}'
+ {% endfor %}
+ {% endif %}
+ passthrough_whitelist: |
+ [
+ {%- if 'sriov' in yaml and 'nets' in yaml.sriov %}
+ {% for sriovnet in yaml.sriov.nets %}
+ {%- for vf in sriovnet.whitelists -%}{"address":"{{vf["address"]}}","physical_network":"{{sriovnet.physical}}"}{{',' if not loop.last else ''}}{% endfor %}{{',' if not loop.last else '' -}}
+ {%- endfor -%}
+ {%- if 'gpu' in yaml and 'sriov' in yaml %},{% endif -%}
+ {%- for alias in yaml.gpu.alias %}{"vendor_id": "{{alias.vendor_id}}", "product_id": "{{alias.product_id}}"}{{',' if not loop.last else ''}}{% endfor -%}
+ ]
+ {% endif %}
+{% endif %}
+...
--- /dev/null
+---
+##############################################################################
+# Copyright (c) 2018 AT&T Intellectual Property. All rights reserved. #
+# #
+# Licensed under the Apache License, Version 2.0 (the "License"); you may #
+# not use this file except in compliance with the License. #
+# #
+# You may obtain a copy of the License at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+# #
+# Unless required by applicable law or agreed to in writing, software #
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
+# See the License for the specific language governing permissions and #
+# limitations under the License. #
+##############################################################################
+
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: openvswitch
+ replacement: true
+ labels:
+ name: openvswitch-global
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: openvswitch-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+
+data:
+ values:
+ pod:
+ resources:
+ enabled: true
+ ovs:
+ vswitchd:
+ requests:
+ memory: "8Gi"
+ cpu: "4000m"
+ limits:
+ memory: "8Gi"
+ cpu: "4000m"
+ hugepages-1Gi: "8Gi"
+ conf:
+ ovs_dpdk:
+ enabled: true
+ hugepages_mountpath: /dev/hugepages_1G
+ socket_memory: '4096,4096'
+ pmd_cpu_mask: '0xF'
+ vhostuser_socket_dir: vhostuser
+ dependencies:
+ - openstack-openvswitch-helm-toolkit
+...
+---
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: openstack-openvswitch-helm-toolkit
+ layeringDefinition:
+ abstract: false
+ layer: global
+ storagePolicy: cleartext
+ substitutions:
+ # Chart source
+ - src:
+ schema: pegleg/SoftwareVersions/v1
+ name: software-versions
+ path: .charts.osh.openvswitch-htk
+ dest:
+ path: .source
+
+data:
+ chart_name: openstack-openvswitch-helm-toolkit
+ release: openstack-openvswitch-helm-toolkit
+ namespace: helm-toolkit
+ values: {}
+ dependencies: []
+...
--- /dev/null
+---
+# The purpose of this file is to define envrionment-specific parameters for the
+# ceph client
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: tenant-ceph-client
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: tenant-ceph-client-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+data:
+ values:
+ conf:
+ pool:
+ target:
+ osd: {{yaml.tenant_storage.osd_count}}
+...
--- /dev/null
+---
+# The purpose of this file is to define environment-specific parameters for
+# ceph-osd
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: tenant-ceph-osd
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: tenant-ceph-osd-global
+ actions:
+ - method: replace
+ path: .values.conf.storage.osd
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+data:
+ values:
+ labels:
+ osd:
+ node_selector_key: tenant-ceph-osd
+ node_selector_value: enabled
+ conf:
+ storage:
+ failure_domain: "rack"
+ # NEWSITE-CHANGEME: The OSD count and configuration here should not need
+ # to change if your HW matches the HW used in this environment.
+ # Otherwise you may need to add or subtract disks to this list.
+ # no need to create below jounal partitons as ceph charts will create them
+ # default size of journal partions is 10GB
+ osd:
+{% for osd in yaml.tenant_storage.osds %}
+ - data:
+ type: block-logical
+ location: {{osd.data}}
+ journal:
+ type: block-logical
+ location: {{osd.journal}}
+{% endfor %}
+ overrides:
+ ceph_osd:
+ hosts:
+ - name: {{yaml.genesis.name}}
+ conf:
+ storage:
+ failure_domain_name: "{{yaml.genesis.name}}_rack"
+{% for server in yaml.masters %}
+ - name: {{server.name}}
+ conf:
+ storage:
+ failure_domain_name: "{{server.name}}_rack"
+{% endfor %}
+{% if 'workers' in yaml %}{% for server in yaml.workers %}
+ - name: {{server.name}}
+ conf:
+ storage:
+ failure_domain_name: "{{server.name}}_rack"
+{% endfor %}{% endif %}
+...
--- /dev/null
+---
+# The purpose of this file is to define environment-specific parameters for ceph
+# client update
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ucp-ceph-client-update
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: ucp-ceph-client-update-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+data:
+ values:
+ conf:
+ pool:
+ target:
+ # NEWSITE-CHANGEME: Total number of OSDs. Does not need to change if
+ # your HW matches this site's HW. Verify for your environment.
+ # 8 OSDs per node x 3 nodes = 24
+ osd: {{yaml.storage.total_osd_count}}
+...
--- /dev/null
+---
+# The purpose of this file is to define envrionment-specific parameters for the
+# ceph client
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ucp-ceph-client
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: ucp-ceph-client-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+data:
+ values:
+ conf:
+ pool:
+ target:
+ # NEWSITE-CHANGEME: The number of OSDs per ceph node. Does not need to
+ # change if your deployment HW matches this site's HW.
+ osd: {{yaml.storage.osd_count}}
+...
--- /dev/null
+---
+# The purpose of this file is to define environment-specific parameters for
+# ceph-osd
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ucp-ceph-osd
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: ucp-ceph-osd-global
+ actions:
+ - method: replace
+ path: .values.conf.storage.osd
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+data:
+ values:
+ conf:
+ storage:
+ failure_domain: "rack"
+ # NEWSITE-CHANGEME: The OSD count and configuration here should not need
+ # to change if your HW matches the HW used in this environment.
+ # Otherwise you may need to add or subtract disks to this list.
+ # no need to create below jounal partitons as ceph charts will create them
+ # default size of journal partions is 10GB
+ osd:
+{% for osd in yaml.storage.osds %}
+ - data:
+ type: block-logical
+ location: {{osd.data}}
+ journal:
+ type: block-logical
+ location: {{osd.journal}}
+{% endfor %}
+ overrides:
+ ceph_osd:
+ hosts:
+ - name: {{yaml.genesis.name}}
+ conf:
+ storage:
+ failure_domain_name: "{{yaml.genesis.name}}_rack"
+{% for server in yaml.masters %}
+ - name: {{server.name}}
+ conf:
+ storage:
+ failure_domain_name: "{{server.name}}_rack"
+{% endfor %}
+...
--- /dev/null
+---
+# The purpose of this file is to define site-specific parameters to the
+# UAM-lite portion of the divingbell chart:
+# 1. User accounts to create on bare metal
+# 2. SSH public key for operationg system access to the bare metal
+# 3. Passwords for operating system access via iDrac/iLo console. SSH password-
+# based auth is disabled.
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ucp-divingbell
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: ucp-divingbell-global
+ actions:
+ - method: merge
+ path: .
+ labels:
+ name: ucp-divingbell-site
+ storagePolicy: cleartext
+ substitutions:
+ - dest:
+ path: .values.conf.uamlite.users[0].user_sshkeys[0]
+ src:
+ schema: deckhand/PublicKey/v1
+ name: airship_ssh_public_key
+ path: .
+ - dest:
+ path: .values.conf.uamlite.users[0].user_crypt_passwd
+ src:
+ schema: deckhand/Passphrase/v1
+ name: ubuntu_crypt_password
+ path: .
+ - dest:
+ path: .values.conf.uamlite.users[0].user_sshkeys[1]
+ src:
+ schema: deckhand/PublicKey/v1
+ name: localadmin_ssh_public_key
+ path: .
+data:
+ values:
+ conf:
+ uamlite:
+ users:
+ - user_name: localadmin
+ user_sudo: true
+ user_sshkeys: []
+ mounts:
+ mnt1:
+ mnt_tgt: /dev/hugepages_1G
+ device: none
+ type: hugetlbfs
+ options: 'mode=775,pagesize=1G'
+...
--- /dev/null
+---
+# The purpose of this file is to provide site-specific parameters for the ucp-
+# promenade chart.
+schema: armada/Chart/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ucp-promenade
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: ucp-promenade-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+data:
+ values:
+ pod:
+ env:
+ promenade_api:
+ - name: no_proxy
+ value: localhost,127.0.0.1,192.168.0.0/16,172.0.0.0/8,10.0.0.0/8
+ - name: NO_PROXY
+ value: localhost,127.0.0.1,192.168.0.0/16,172.0.0.0/8,10.0.0.0/8
+ # NEWSITE-CHANGEME: If your site uses an http proxy, enter it here.
+ # Otherwise comment out these lines.
+ # - name: http_proxy
+ # value: 'http://proxy.example.com:8080'
+ # NEWSITE-CHANGEME: If your site uses an https proxy, enter it here.
+ # Otherwise comment out these lines.
+ # - name: https_proxy
+ # value: 'http://proxy.example.com:8080'
+ # NEWSITE-CHANGEME: If your site uses an http/https proxy, enter the
+ # IPs / domain names which the proxy should not be used for (i.e. the
+ # cluster domain and kubernetes service_cidr defined in common-addresses)
+ # Otherwise comment out these lines.
+ # - name: no_proxy
+ # value: '10.96.0.1,.cluster.local'
+ # NEWSITE-CHANGEME: If your site uses an http proxy, enter it here.
+ # Otherwise comment out these lines.
+ # - name: HTTP_PROXY
+ # value: 'http://proxy.example.com:8080'
+ # NEWSITE-CHANGEME: If your site uses an https proxy, enter it here.
+ # Otherwise comment out these lines.
+ # - name: HTTPS_PROXY
+ # value: 'http://proxy.example.com:8080'
+ # NEWSITE-CHANGEME: If your site uses an http/https proxy, enter the
+ # IPs / domain names which the proxy should not be used for (i.e. the
+ # cluster domain and kubernetes service_cidr defined in common-addresses)
+ # Otherwise comment out these lines.
+ # - name: NO_PROXY
+ # value: '10.96.0.1,.cluster.local'
+...
--- /dev/null
+---
+# The purpose of this file is to define site-specific common software config
+# paramters.
+schema: pegleg/CommonSoftwareConfig/v1
+metadata:
+ schema: metadata/Document/v1
+ name: common-software-config
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ osh:
+ # NEWSITE-CHANGEME: Replace with the site name
+ region_name: RegionOne
+...
--- /dev/null
+---
+# The purpose of this file is to define the site's endpoint catalog. This should
+# not need to be modified for a new site.
+# #GLOBAL-CANDIDATE#
+schema: pegleg/EndpointCatalogue/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ucp_endpoints
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+ substitutions:
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .dns.ingress_domain
+ dest:
+ - path: .ucp.identity.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .ucp.identity.host_fqdn_override.admin.host
+ pattern: DOMAIN
+ - path: .ucp.shipyard.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .ucp.physicalprovisioner.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .ucp.maas_region.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .ceph.object_store.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .ceph.ceph_object_store.host_fqdn_override.public.host
+ pattern: DOMAIN
+# - src:
+# schema: deckhand/Certificate/v1
+# name: ingress-crt
+# path: .
+# dest:
+# - path: .ucp.identity.host_fqdn_override.public.tls.crt
+# - path: .ucp.shipyard.host_fqdn_override.public.tls.crt
+# - path: .ucp.physicalprovisioner.host_fqdn_override.public.tls.crt
+# - path: .ceph.object_store.host_fqdn_override.public.tls.crt
+# - path: .ceph.ceph_object_store.host_fqdn_override.public.tls.crt
+# - src:
+# schema: deckhand/CertificateAuthority/v1
+# name: ingress-ca
+# path: .
+# dest:
+# - path: .ucp.identity.host_fqdn_override.public.tls.ca
+# - path: .ucp.shipyard.host_fqdn_override.public.tls.ca
+# - path: .ucp.physicalprovisioner.host_fqdn_override.public.tls.ca
+# - path: .ceph.object_store.host_fqdn_override.public.tls.ca
+# - path: .ceph.ceph_object_store.host_fqdn_override.public.tls.ca
+# - src:
+# schema: deckhand/CertificateKey/v1
+# name: ingress-key
+# path: .
+# dest:
+# - path: .ucp.identity.host_fqdn_override.public.tls.key
+# - path: .ucp.shipyard.host_fqdn_override.public.tls.key
+# - path: .ucp.physicalprovisioner.host_fqdn_override.public.tls.key
+# - path: .ceph.object_store.host_fqdn_override.public.tls.key
+# - path: .ceph.ceph_object_store.host_fqdn_override.public.tls.key
+data:
+ ucp:
+ identity:
+ namespace: ucp
+ name: keystone
+ hosts:
+ default: keystone
+ internal: keystone-api
+ host_fqdn_override:
+ default: null
+ public:
+ host: iam-sw.DOMAIN
+ admin:
+ host: iam-sw.DOMAIN
+ path:
+ default: /v3
+ scheme:
+ default: "http"
+ internal: "http"
+ public: "http"
+ #internal: "https"
+ port:
+ api:
+ default: 80
+ internal: 5000
+ armada:
+ name: armada
+ hosts:
+ default: armada-api
+ public: armada
+ port:
+ api:
+ default: 8000
+ path:
+ default: /api/v1.0
+ scheme:
+ default: "http"
+ host_fqdn_override:
+ default: null
+ deckhand:
+ name: deckhand
+ hosts:
+ default: deckhand-int
+ public: deckhand-api
+ port:
+ api:
+ default: 9000
+ path:
+ default: /api/v1.0
+ scheme:
+ default: "http"
+ host_fqdn_override:
+ default: null
+ postgresql:
+ name: postgresql
+ hosts:
+ default: postgresql
+ path: /DB_NAME
+ scheme: postgresql+psycopg2
+ port:
+ postgresql:
+ default: 5432
+ host_fqdn_override:
+ default: null
+ postgresql_airflow_celery:
+ name: postgresql_airflow_celery_db
+ hosts:
+ default: postgresql
+ path: /DB_NAME
+ scheme: db+postgresql
+ port:
+ postgresql:
+ default: 5432
+ host_fqdn_override:
+ default: null
+ oslo_db:
+ hosts:
+ default: mariadb
+ discovery: mariadb-discovery
+ host_fqdn_override:
+ default: null
+ path: /DB_NAME
+ scheme: mysql+pymysql
+ port:
+ mysql:
+ default: 3306
+ wsrep:
+ default: 4567
+ key_manager:
+ name: barbican
+ hosts:
+ default: barbican-api
+ public: barbican
+ host_fqdn_override:
+ default: null
+ path:
+ default: /v1
+ scheme:
+ default: "http"
+ port:
+ api:
+ default: 9311
+ public: 80
+ airflow_oslo_messaging:
+ namespace: null
+ hosts:
+ default: rabbitmq
+ host_fqdn_override:
+ default: null
+ path: /airflow
+ scheme: amqp
+ port:
+ amqp:
+ default: 5672
+ http:
+ default: 15672
+ oslo_messaging:
+ namespace: null
+ statefulset:
+ name: airship-ucp-rabbitmq-rabbitmq
+ hosts:
+ default: rabbitmq
+ host_fqdn_override:
+ default: null
+ path: /keystone
+ scheme: rabbit
+ port:
+ amqp:
+ default: 5672
+ oslo_cache:
+ hosts:
+ default: memcached
+ host_fqdn_override:
+ default: null
+ port:
+ memcache:
+ default: 11211
+ physicalprovisioner:
+ name: drydock
+ hosts:
+ default: drydock-api
+ port:
+ api:
+ default: 9000
+ nodeport: 31900
+ public: 80
+ path:
+ default: /api/v1.0
+ scheme:
+ default: "http"
+ public: "http"
+ host_fqdn_override:
+ default: null
+ public:
+ host: drydock-sw.DOMAIN
+ maas_region:
+ name: maas-region
+ hosts:
+ default: maas-region
+ public: maas
+ path:
+ default: /MAAS
+ scheme:
+ default: "http"
+ port:
+ region_api:
+ default: 80
+ nodeport: 31900
+ podport: 80
+ public: 80
+ region_proxy:
+ default: 8000
+ host_fqdn_override:
+ default: null
+ public:
+ host: maas-sw.DOMAIN
+ maas_ingress:
+ hosts:
+ default: maas-ingress
+ error_pages: maas-ingress-error
+ host_fqdn_override:
+ public: null
+ port:
+ http:
+ default: 80
+ https:
+ default: 443
+ ingress_default_server:
+ default: 8383
+ error_pages:
+ default: 8080
+ podport: 8080
+ healthz:
+ podport: 10259
+ status:
+ podport: 18089
+ kubernetesprovisioner:
+ name: promenade
+ hosts:
+ default: promenade-api
+ port:
+ api:
+ default: 80
+ path:
+ default: /api/v1.0
+ scheme:
+ default: "http"
+ host_fqdn_override:
+ default: null
+ shipyard:
+ name: shipyard
+ hosts:
+ default: shipyard-int
+ public: shipyard-api
+ port:
+ api:
+ default: 9000
+ public: 80
+ path:
+ default: /api/v1.0
+ scheme:
+ default: "http"
+ public: "http"
+ host_fqdn_override:
+ default: null
+ public:
+ host: shipyard-sw.DOMAIN
+ prometheus_openstack_exporter:
+ namespace: ucp
+ hosts:
+ default: openstack-metrics
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ exporter:
+ default: 9103
+ ceph:
+ object_store:
+ name: swift
+ namespace: ceph
+ hosts:
+ default: ceph-rgw
+ public: radosgw
+ host_fqdn_override:
+ default: null
+ public:
+ host: object-store-sw.DOMAIN
+ path:
+ default: /swift/v1
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8088
+ public: 80
+ ceph_object_store:
+ name: radosgw
+ namespace: ceph
+ hosts:
+ default: ceph-rgw
+ public: radosgw
+ host_fqdn_override:
+ default: null
+ public:
+ host: object-store-sw.DOMAIN
+ path:
+ default: /auth/v1.0
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8088
+ public: 80
+ ceph_mon:
+ namespace: ceph
+ hosts:
+ default: ceph-mon
+ discovery: ceph-mon-discovery
+ host_fqdn_override:
+ default: null
+ port:
+ mon:
+ default: 6789
+ ceph_mgr:
+ namespace: ceph
+ hosts:
+ default: ceph-mgr
+ host_fqdn_override:
+ default: null
+ port:
+ mgr:
+ default: 7000
+ scheme:
+ default: "http"
+ tenant_ceph_mon:
+ namespace: tenant-ceph
+ hosts:
+ default: ceph-mon
+ discovery: ceph-mon-discovery
+ host_fqdn_override:
+ default: null
+ port:
+ mon:
+ default: 6790
+ tenant_ceph_mgr:
+ namespace: tenant-ceph
+ hosts:
+ default: ceph-mgr
+ host_fqdn_override:
+ default: null
+ port:
+ mgr:
+ default: 7001
+ metrics:
+ default: 9284
+ scheme:
+ default: http
+...
+---
+schema: pegleg/EndpointCatalogue/v1
+metadata:
+ schema: metadata/Document/v1
+ name: osh_endpoints
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+ substitutions:
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .dns.ingress_domain
+ dest:
+ - path: .osh.object_store.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.ceph_object_store.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.image.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.cloudformation.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.orchestration.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.compute.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.compute_novnc_proxy.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.placement.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.network.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.identity.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.identity.host_fqdn_override.admin.host
+ pattern: DOMAIN
+ - path: .osh.dashboard.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.volume.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.volumev2.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh.volumev3.host_fqdn_override.public.host
+ pattern: DOMAIN
+# - src:
+# schema: deckhand/Certificate/v1
+# name: ingress-crt
+# path: .
+# dest:
+# - path: .osh.object_store.host_fqdn_override.public.tls.crt
+# - path: .osh.ceph_object_store.host_fqdn_override.public.tls.crt
+# - path: .osh.identity.host_fqdn_override.public.tls.crt
+# - path: .osh.orchestration.host_fqdn_override.public.tls.crt
+# - path: .osh.cloudformation.host_fqdn_override.public.tls.crt
+# - path: .osh.dashboard.host_fqdn_override.public.tls.crt
+# - path: .osh.image.host_fqdn_override.public.tls.crt
+# - path: .osh.volume.host_fqdn_override.public.tls.crt
+# - path: .osh.volumev2.host_fqdn_override.public.tls.crt
+# - path: .osh.volumev3.host_fqdn_override.public.tls.crt
+# - path: .osh.compute.host_fqdn_override.public.tls.crt
+# - path: .osh.compute_novnc_proxy.host_fqdn_override.public.tls.crt
+# - path: .osh.placement.host_fqdn_override.public.tls.crt
+# - path: .osh.network.host_fqdn_override.public.tls.crt
+# - src:
+# schema: deckhand/CertificateAuthority/v1
+# name: ingress-ca
+# path: .
+# dest:
+# - path: .osh.object_store.host_fqdn_override.public.tls.ca
+# - path: .osh.ceph_object_store.host_fqdn_override.public.tls.ca
+# - path: .osh.identity.host_fqdn_override.public.tls.ca
+# - path: .osh.orchestration.host_fqdn_override.public.tls.ca
+# - path: .osh.cloudformation.host_fqdn_override.public.tls.ca
+# - path: .osh.dashboard.host_fqdn_override.public.tls.ca
+# - path: .osh.image.host_fqdn_override.public.tls.ca
+# - path: .osh.volume.host_fqdn_override.public.tls.ca
+# - path: .osh.volumev2.host_fqdn_override.public.tls.ca
+# - path: .osh.volumev3.host_fqdn_override.public.tls.ca
+# - path: .osh.compute.host_fqdn_override.public.tls.ca
+# - path: .osh.compute_novnc_proxy.host_fqdn_override.public.tls.ca
+# - path: .osh.placement.host_fqdn_override.public.tls.ca
+# - path: .osh.network.host_fqdn_override.public.tls.ca
+# - src:
+# schema: deckhand/CertificateKey/v1
+# name: ingress-key
+# path: .
+# dest:
+# - path: .osh.object_store.host_fqdn_override.public.tls.key
+# - path: .osh.ceph_object_store.host_fqdn_override.public.tls.key
+# - path: .osh.identity.host_fqdn_override.public.tls.key
+# - path: .osh.orchestration.host_fqdn_override.public.tls.key
+# - path: .osh.cloudformation.host_fqdn_override.public.tls.key
+# - path: .osh.dashboard.host_fqdn_override.public.tls.key
+# - path: .osh.image.host_fqdn_override.public.tls.key
+# - path: .osh.volume.host_fqdn_override.public.tls.key
+# - path: .osh.volumev2.host_fqdn_override.public.tls.key
+# - path: .osh.volumev3.host_fqdn_override.public.tls.key
+# - path: .osh.compute.host_fqdn_override.public.tls.key
+# - path: .osh.compute_novnc_proxy.host_fqdn_override.public.tls.key
+# - path: .osh.placement.host_fqdn_override.public.tls.key
+# - path: .osh.network.host_fqdn_override.public.tls.key
+data:
+ osh:
+ object_store:
+ name: swift
+ namespace: openstack
+ hosts:
+ default: ceph-rgw
+ public: radosgw
+ host_fqdn_override:
+ default: null
+ public:
+ host: object-store-sw.DOMAIN
+ path:
+ default: /swift/v1/KEY_$(tenant_id)s
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8088
+ public: 80
+ ceph_object_store:
+ name: radosgw
+ namespace: openstack
+ hosts:
+ default: ceph-rgw
+ public: radosgw
+ host_fqdn_override:
+ default: null
+ public:
+ host: object-store-sw.DOMAIN
+ path:
+ default: /auth/v1.0
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8088
+ public: 80
+ oslo_db:
+ hosts:
+ default: mariadb
+ discovery: mariadb-discovery
+ host_fqdn_override:
+ default: null
+ path: /DB_NAME
+ scheme: mysql+pymysql
+ port:
+ mysql:
+ default: 3306
+ wsrep:
+ default: 4567
+ prometheus_mysql_exporter:
+ namespace: openstack
+ hosts:
+ default: mysql-exporter
+ host_fqdn_override:
+ default: null
+ path:
+ default: /metrics
+ scheme:
+ default: 'http'
+ port:
+ metrics:
+ default: 9104
+ oslo_messaging:
+ statefulset:
+ name: airship-openstack-rabbitmq-rabbitmq
+ namespace: openstack
+ hosts:
+ default: openstack-rabbitmq
+ host_fqdn_override:
+ default: null
+ path: /VHOST_NAME
+ scheme: rabbit
+ port:
+ amqp:
+ default: 5672
+ http:
+ default: 15672
+ openstack_rabbitmq_exporter:
+ namespace: openstack
+ hosts:
+ default: openstack-rabbitmq-exporter
+ host_fqdn_override:
+ default: null
+ path:
+ default: /metrics
+ scheme:
+ default: "http"
+ port:
+ metrics:
+ default: 9095
+ oslo_cache:
+ namespace: openstack
+ hosts:
+ default: memcached
+ host_fqdn_override:
+ default: null
+ port:
+ memcache:
+ default: 11211
+ identity:
+ namespace: openstack
+ name: keystone
+ hosts:
+ default: keystone
+ internal: keystone-api
+ host_fqdn_override:
+ default: null
+ public:
+ host: identity-sw.DOMAIN
+ admin:
+ host: identity-sw.DOMAIN
+ path:
+ default: /v3
+ scheme:
+ default: "http"
+ internal: "http"
+ public: "http"
+ #internal: "https"
+ port:
+ api:
+ default: 80
+ internal: 5000
+ image:
+ name: glance
+ hosts:
+ default: glance-api
+ public: glance
+ host_fqdn_override:
+ default: null
+ public:
+ host: image-sw.DOMAIN
+ path:
+ default: null
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 9292
+ public: 80
+ image_registry:
+ name: glance-registry
+ hosts:
+ default: glance-registry
+ public: glance-reg
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ api:
+ default: 9191
+ public: 80
+ volume:
+ name: cinder
+ hosts:
+ default: cinder-api
+ public: cinder
+ host_fqdn_override:
+ default: null
+ public:
+ host: volume-sw.DOMAIN
+ path:
+ default: "/v1/%(tenant_id)s"
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8776
+ public: 80
+ volumev2:
+ name: cinderv2
+ hosts:
+ default: cinder-api
+ public: cinder
+ host_fqdn_override:
+ default: null
+ public:
+ host: volume-sw.DOMAIN
+ path:
+ default: "/v2/%(tenant_id)s"
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8776
+ public: 80
+ volumev3:
+ name: cinderv3
+ hosts:
+ default: cinder-api
+ public: cinder
+ host_fqdn_override:
+ default: null
+ public:
+ host: volume-sw.DOMAIN
+ path:
+ default: "/v3/%(tenant_id)s"
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8776
+ public: 80
+ orchestration:
+ name: heat
+ hosts:
+ default: heat-api
+ public: heat
+ host_fqdn_override:
+ default: null
+ public:
+ host: orchestration-sw.DOMAIN
+ path:
+ default: "/v1/%(project_id)s"
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8004
+ public: 80
+ cloudformation:
+ name: heat-cfn
+ hosts:
+ default: heat-cfn
+ public: cloudformation
+ host_fqdn_override:
+ default: null
+ public:
+ host: cloudformation-sw.DOMAIN
+ path:
+ default: /v1
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8000
+ public: 80
+ cloudwatch:
+ name: heat-cloudwatch
+ hosts:
+ default: heat-cloudwatch
+ public: cloudwatch
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ type: null
+ scheme:
+ default: "http"
+ port:
+ api:
+ default: 8003
+ public: 80
+ network:
+ name: neutron
+ hosts:
+ default: neutron-server
+ public: neutron
+ host_fqdn_override:
+ default: null
+ public:
+ host: network-sw.DOMAIN
+ path:
+ default: null
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 9696
+ public: 80
+ compute:
+ name: nova
+ hosts:
+ default: nova-api
+ public: nova
+ host_fqdn_override:
+ default: null
+ public:
+ host: compute-sw.DOMAIN
+ path:
+ default: "/v2/%(tenant_id)s"
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8774
+ public: 80
+ novncproxy:
+ default: 80
+ compute_metadata:
+ name: nova
+ hosts:
+ default: nova-metadata
+ public: metadata
+ host_fqdn_override:
+ default: null
+ path:
+ default: /
+ scheme:
+ default: "http"
+ port:
+ metadata:
+ default: 8775
+ public: 80
+ compute_novnc_proxy:
+ name: nova
+ hosts:
+ default: nova-novncproxy
+ public: novncproxy
+ host_fqdn_override:
+ default: null
+ public:
+ host: nova-novncproxy-sw.DOMAIN
+ path:
+ default: /vnc_auto.html
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ novnc_proxy:
+ default: 6080
+ public: 80
+ compute_spice_proxy:
+ name: nova
+ hosts:
+ default: nova-spiceproxy
+ host_fqdn_override:
+ default: null
+ path:
+ default: /spice_auto.html
+ scheme:
+ default: "http"
+ port:
+ spice_proxy:
+ default: 6082
+ placement:
+ name: placement
+ hosts:
+ default: placement-api
+ public: placement
+ host_fqdn_override:
+ default: null
+ public:
+ host: placement-sw.DOMAIN
+ path:
+ default: /
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ api:
+ default: 8778
+ public: 80
+ dashboard:
+ name: horizon
+ hosts:
+ default: horizon-int
+ public: horizon
+ host_fqdn_override:
+ default: null
+ public:
+ host: dashboard-sw.DOMAIN
+ path:
+ default: null
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ web:
+ default: 80
+ public: 80
+...
+---
+schema: pegleg/EndpointCatalogue/v1
+metadata:
+ schema: metadata/Document/v1
+ name: osh_infra_endpoints
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+ substitutions:
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .dns.ingress_domain
+ dest:
+ - path: .osh_infra.kibana.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh_infra.grafana.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - path: .osh_infra.nagios.host_fqdn_override.public.host
+ pattern: DOMAIN
+# - src:
+# schema: deckhand/Certificate/v1
+# name: ingress-crt
+# path: .
+# dest:
+# - path: .osh_infra.kibana.host_fqdn_override.public.tls.crt
+# - path: .osh_infra.grafana.host_fqdn_override.public.tls.crt
+# - path: .osh_infra.nagios.host_fqdn_override.public.tls.crt
+# - src:
+# schema: deckhand/CertificateAuthority/v1
+# name: ingress-ca
+# path: .
+# dest:
+# - path: .osh_infra.kibana.host_fqdn_override.public.tls.ca
+# - path: .osh_infra.grafana.host_fqdn_override.public.tls.ca
+# - path: .osh_infra.nagios.host_fqdn_override.public.tls.ca
+# - src:
+# schema: deckhand/CertificateKey/v1
+# name: ingress-key
+# path: .
+# dest:
+# - path: .osh_infra.kibana.host_fqdn_override.public.tls.key
+# - path: .osh_infra.grafana.host_fqdn_override.public.tls.key
+# - path: .osh_infra.nagios.host_fqdn_override.public.tls.key
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .ldap.base_url
+ dest:
+ path: .osh_infra.ldap.host_fqdn_override.public.host
+ pattern: DOMAIN
+ - src:
+ schema: pegleg/CommonAddresses/v1
+ name: common-addresses
+ path: .ldap.auth_path
+ dest:
+ path: .osh_infra.ldap.path.default
+ pattern: AUTH_PATH
+data:
+ osh_infra:
+ ceph_object_store:
+ name: radosgw
+ namespace: osh-infra
+ hosts:
+ default: ceph-rgw
+ public: radosgw
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ api:
+ default: 8088
+ public: 80
+ elasticsearch:
+ name: elasticsearch
+ namespace: osh-infra
+ hosts:
+ data: elasticsearch-data
+ default: elasticsearch-logging
+ discovery: elasticsearch-discovery
+ public: elasticsearch
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ prometheus_elasticsearch_exporter:
+ namespace: null
+ hosts:
+ default: elasticsearch-exporter
+ host_fqdn_override:
+ default: null
+ path:
+ default: /metrics
+ scheme:
+ default: "http"
+ port:
+ metrics:
+ default: 9108
+ fluentd:
+ namespace: osh-infra
+ name: fluentd
+ hosts:
+ default: fluentd-logging
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ service:
+ default: 24224
+ metrics:
+ default: 24220
+ prometheus_fluentd_exporter:
+ namespace: osh-infra
+ hosts:
+ default: fluentd-exporter
+ host_fqdn_override:
+ default: null
+ path:
+ default: /metrics
+ scheme:
+ default: "http"
+ port:
+ metrics:
+ default: 9309
+ oslo_db:
+ namespace: osh-infra
+ hosts:
+ default: mariadb
+ host_fqdn_override:
+ default: null
+ path: /DB_NAME
+ scheme: mysql+pymysql
+ port:
+ mysql:
+ default: 3306
+ prometheus_mysql_exporter:
+ namespace: osh-infra
+ hosts:
+ default: mysql-exporter
+ host_fqdn_override:
+ default: null
+ path:
+ default: /metrics
+ scheme:
+ default: 'http'
+ port:
+ metrics:
+ default: 9104
+ grafana:
+ name: grafana
+ namespace: osh-infra
+ hosts:
+ default: grafana-dashboard
+ public: grafana
+ host_fqdn_override:
+ default: null
+ public:
+ host: grafana-sw.DOMAIN
+ path:
+ default: null
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ grafana:
+ default: 3000
+ public: 80
+ monitoring:
+ name: prometheus
+ namespace: osh-infra
+ hosts:
+ default: prom-metrics
+ public: prometheus
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ api:
+ default: 9090
+ http:
+ default: 80
+ kibana:
+ name: kibana
+ namespace: osh-infra
+ hosts:
+ default: kibana-dash
+ public: kibana
+ host_fqdn_override:
+ default: null
+ public:
+ host: kibana-sw.DOMAIN
+ path:
+ default: null
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ kibana:
+ default: 5601
+ public: 80
+ alerts:
+ name: alertmanager
+ namespace: osh-infra
+ hosts:
+ default: alerts-engine
+ public: alertmanager
+ discovery: alertmanager-discovery
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ api:
+ default: 9093
+ public: 80
+ mesh:
+ default: 6783
+ kube_state_metrics:
+ namespace: kube-system
+ hosts:
+ default: kube-state-metrics
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ http:
+ default: 8080
+ kube_scheduler:
+ scheme:
+ default: "http"
+ path:
+ default: /metrics
+ kube_controller_manager:
+ scheme:
+ default: "http"
+ path:
+ default: /metrics
+ node_metrics:
+ namespace: kube-system
+ hosts:
+ default: node-exporter
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ metrics:
+ default: 9100
+ prometheus_port:
+ default: 9100
+ process_exporter_metrics:
+ namespace: kube-system
+ hosts:
+ default: process-exporter
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ metrics:
+ default: 9256
+ prometheus_openstack_exporter:
+ namespace: openstack
+ hosts:
+ default: openstack-metrics
+ host_fqdn_override:
+ default: null
+ path:
+ default: null
+ scheme:
+ default: "http"
+ port:
+ exporter:
+ default: 9103
+ nagios:
+ name: nagios
+ namespace: osh-infra
+ hosts:
+ default: nagios-metrics
+ public: nagios
+ host_fqdn_override:
+ default: null
+ public:
+ host: nagios-sw.DOMAIN
+ path:
+ default: null
+ scheme:
+ default: "http"
+ public: "http"
+ port:
+ http:
+ default: 80
+ public: 80
+ ldap:
+ hosts:
+ default: ldap
+ host_fqdn_override:
+ default: null
+ public:
+ host: DOMAIN
+ path:
+ default: /AUTH_PATH
+ scheme:
+ default: "ldap"
+ port:
+ ldap:
+ default: 389
+...
--- /dev/null
+---
+# The purpose of this file is to define the account catalog for the site. This
+# mostly contains service usernames, but also contain some information which
+# should be changed like the region (site) name.
+schema: pegleg/AccountCatalogue/v1
+metadata:
+ schema: metadata/Document/v1
+ name: ucp_service_accounts
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+data:
+ ucp:
+ postgres:
+ admin:
+ username: postgres
+ replica:
+ username: standby
+ exporter:
+ username: psql_exporter
+ oslo_db:
+ admin:
+ username: root
+ oslo_messaging:
+ admin:
+ username: rabbitmq
+ keystone:
+ admin:
+ # NEWSITE-CHANGEME: Replace with the site name
+ region_name: RegionOne
+ username: admin
+ project_name: admin
+ user_domain_name: default
+ project_domain_name: default
+ oslo_messaging:
+ admin:
+ username: rabbitmq
+ keystone:
+ username: keystone
+ oslo_db:
+ username: keystone
+ database: keystone
+ promenade:
+ keystone:
+ # NEWSITE-CHANGEME: Replace with the site name
+ region_name: RegionOne
+ role: admin
+ project_name: service
+ project_domain_name: default
+ user_domain_name: default
+ username: promenade
+ drydock:
+ keystone:
+ # NEWSITE-CHANGEME: Replace with the site name
+ region_name: RegionOne
+ role: admin
+ project_name: service
+ project_domain_name: default
+ user_domain_name: default
+ username: drydock
+ postgres:
+ username: drydock
+ database: drydock
+ shipyard:
+ keystone:
+ # NEWSITE-CHANGEME: Replace with the site name
+ region_name: RegionOne
+ role: admin
+ project_name: service
+ project_domain_name: default
+ user_domain_name: default
+ username: shipyard
+ postgres:
+ username: shipyard
+ database: shipyard
+ airflow:
+ postgres:
+ username: airflow
+ database: airflow
+ oslo_messaging:
+ admin:
+ username: rabbitmq
+ user:
+ username: airflow
+ maas:
+ admin:
+ username: admin
+ email: none@none
+ postgres:
+ username: maas
+ database: maasdb
+ barbican:
+ keystone:
+ # NEWSITE-CHANGEME: Replace with the site name
+ region_name: RegionOne
+ role: admin
+ project_name: service
+ project_domain_name: default
+ user_domain_name: default
+ username: barbican
+ oslo_db:
+ username: barbican
+ database: barbican
+ oslo_messaging:
+ admin:
+ username: rabbitmq
+ keystone:
+ username: keystone
+ armada:
+ keystone:
+ project_domain_name: default
+ user_domain_name: default
+ project_name: service
+ # NEWSITE-CHANGEME: Replace with the site name
+ region_name: RegionOne
+ role: admin
+ username: armada
+ deckhand:
+ keystone:
+ # NEWSITE-CHANGEME: Replace with the site name
+ region_name: RegionOne
+ role: admin
+ project_name: service
+ project_domain_name: default
+ user_domain_name: default
+ username: deckhand
+ postgres:
+ username: deckhand
+ database: deckhand
+ prometheus_openstack_exporter:
+ user:
+ region_name: RegionOne
+ role: admin
+ username: prometheus-openstack-exporter
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ ceph:
+ swift:
+ keystone:
+ role: admin
+ # NEWSITE-CHANGEME: Replace with the site name
+ region_name: RegionOne
+ username: swift
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+...
+---
+schema: pegleg/AccountCatalogue/v1
+metadata:
+ schema: metadata/Document/v1
+ name: osh_service_accounts
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+ substitutions:
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.keystone.admin.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.cinder.cinder.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.glance.glance.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.heat.heat.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.heat.heat_trustee.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.heat.heat_stack_user.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.swift.keystone.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.neutron.neutron.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.nova.nova.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.nova.placement.region_name
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh.barbican.barbican.region_name
+data:
+ osh:
+ keystone:
+ admin:
+ username: admin
+ project_name: admin
+ user_domain_name: default
+ project_domain_name: default
+ oslo_db:
+ username: keystone
+ database: keystone
+ oslo_messaging:
+ keystone:
+ username: keystone-rabbitmq-user
+ ldap:
+ # NEWSITE-CHANGEME: Replace with the site's LDAP account used to
+ # authenticate to the active directory backend to validate keystone
+ # users.
+ username: "test@ldap.example.com"
+ cinder:
+ cinder:
+ role: admin
+ username: cinder
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ oslo_db:
+ username: cinder
+ database: cinder
+ oslo_messaging:
+ cinder:
+ username: cinder-rabbitmq-user
+ glance:
+ glance:
+ role: admin
+ username: glance
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ oslo_db:
+ username: glance
+ database: glance
+ oslo_messaging:
+ glance:
+ username: glance-rabbitmq-user
+ ceph_object_store:
+ username: glance
+ heat:
+ heat:
+ role: admin
+ username: heat
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ heat_trustee:
+ role: admin
+ username: heat-trust
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ heat_stack_user:
+ role: admin
+ username: heat-domain
+ domain_name: heat
+ oslo_db:
+ username: heat
+ database: heat
+ oslo_messaging:
+ heat:
+ username: heat-rabbitmq-user
+ swift:
+ keystone:
+ role: admin
+ username: swift
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ oslo_db:
+ admin:
+ username: root
+ prometheus_mysql_exporter:
+ user:
+ username: osh-oslodb-exporter
+ neutron:
+ neutron:
+ role: admin
+ username: neutron
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ oslo_db:
+ username: neutron
+ database: neutron
+ oslo_messaging:
+ neutron:
+ username: neutron-rabbitmq-user
+ nova:
+ nova:
+ role: admin
+ username: nova
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ placement:
+ role: admin
+ username: placement
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ oslo_db:
+ username: nova
+ database: nova
+ oslo_db_api:
+ username: nova
+ database: nova_api
+ oslo_db_cell0:
+ username: nova
+ database: "nova_cell0"
+ oslo_messaging:
+ nova:
+ username: nova-rabbitmq-user
+ horizon:
+ oslo_db:
+ username: horizon
+ database: horizon
+ barbican:
+ barbican:
+ role: admin
+ username: barbican
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ oslo_db:
+ username: barbican
+ database: barbican
+ oslo_messaging:
+ barbican:
+ username: barbican-rabbitmq-user
+ oslo_messaging:
+ admin:
+ username: admin
+ tempest:
+ tempest:
+ role: admin
+ username: tempest
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+...
+---
+schema: pegleg/AccountCatalogue/v1
+metadata:
+ schema: metadata/Document/v1
+ name: osh_infra_service_accounts
+ layeringDefinition:
+ abstract: false
+ layer: site
+ storagePolicy: cleartext
+ substitutions:
+ - src:
+ schema: pegleg/CommonSoftwareConfig/v1
+ name: common-software-config
+ path: .osh.region_name
+ dest:
+ path: .osh_infra.prometheus_openstack_exporter.user.region_name
+data:
+ osh_infra:
+ ceph_object_store:
+ admin:
+ username: s3_admin
+ elasticsearch:
+ username: elasticsearch
+ grafana:
+ admin:
+ username: grafana
+ oslo_db:
+ username: grafana
+ database: grafana
+ oslo_db_session:
+ username: grafana_session
+ database: grafana_session
+ elasticsearch:
+ admin:
+ username: elasticsearch
+ oslo_db:
+ admin:
+ username: root
+ prometheus_mysql_exporter:
+ user:
+ username: osh-infra-oslodb-exporter
+ prometheus_openstack_exporter:
+ user:
+ role: admin
+ username: prometheus-openstack-exporter
+ project_name: service
+ user_domain_name: default
+ project_domain_name: default
+ nagios:
+ admin:
+ username: nagios
+ prometheus:
+ admin:
+ username: prometheus
+ ldap:
+ admin:
+ # NEWSITE-CHANGEME: Replace with the site's LDAP account used to
+ # authenticate to the active directory backend to validate keystone
+ # users.
+ bind: "test@ldap.example.com"
+...
--- /dev/null
+---
+schema: pegleg/SoftwareVersions/v1
+metadata:
+ schema: metadata/Document/v1
+ replacement: true
+ name: software-versions
+ layeringDefinition:
+ abstract: false
+ layer: site
+ parentSelector:
+ name: software-versions-global
+ actions:
+ - method: merge
+ path: .
+ storagePolicy: cleartext
+data:
+ charts:
+ osh:
+ neutron:
+ location: https://opendev.org/openstack/openstack-helm
+ reference: 05bff26162cc05286ba563d6e8cec82a36031b7d
+ subpath: neutron
+ type: git
+ neutron-htk:
+ location: https://opendev.org/openstack/openstack-helm-infra
+ reference: d0b32ed88ad652d9c2226466a13bac8b28038399
+ subpath: helm-toolkit
+ type: git
+ nova:
+ location: https://opendev.org/openstack/openstack-helm
+ reference: d2abe39d498f48c4721e26aca19e81189bc8891b
+ subpath: nova
+ type: git
+ nova-htk:
+ location: https://opendev.org/openstack/openstack-helm-infra
+ reference: d0b32ed88ad652d9c2226466a13bac8b28038399
+ subpath: helm-toolkit
+ type: git
+ openvswitch:
+ location: https://opendev.org/openstack/openstack-helm-infra
+ reference: refs/changes/18/688818/2
+ subpath: openvswitch
+ type: git
+ openvswitch-htk:
+ location: https://opendev.org/openstack/openstack-helm-infra
+ reference: d0b32ed88ad652d9c2226466a13bac8b28038399
+ subpath: helm-toolkit
+ type: git
+ images_refs:
+ images:
+ dep_check: &dep_check quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
+ heat: &heat docker.io/openstackhelm/heat:rocky-ubuntu_bionic
+ neutron: &neutron docker.io/openstackhelm/neutron:rocky-ubuntu_bionic
+ horizon: &horizon docker.io/openstackhelm/horizon:rocky-ubuntu_bionic
+ cinder: &cinder docker.io/openstackhelm/cinder:rocky-ubuntu_bionic
+ keystone: &keystone docker.io/openstackhelm/keystone:rocky-ubuntu_bionic
+ nova: &nova docker.io/openstackhelm/nova:rocky-ubuntu_bionic
+ glance: &glance docker.io/openstackhelm/glance:rocky-ubuntu_bionic
+ openvswitch: &openvswitch docker.io/openstackhelm/openvswitch:latest-ubuntu_bionic-dpdk
+ os_barbican: &os_barbican docker.io/openstackhelm/barbican:rocky-ubuntu_bionic
+ libvirt: &libvirt docker.io/openstackhelm/libvirt:latest-ubuntu_bionic
+ images:
+ osh:
+ openvswitch:
+ openvswitch_db_server: "docker.io/openstackhelm/openvswitch:latest-ubuntu_bionic-dpdk"
+ openvswitch_vswitchd: "docker.io/openstackhelm/openvswitch:latest-ubuntu_bionic-dpdk"
+ neutron:
+ bootstrap: *heat
+ db_init: *heat
+ neutron_db_sync: *neutron
+ db_drop: *heat
+ ks_user: *heat
+ ks_service: *heat
+ ks_endpoints: *heat
+ neutron_server: *neutron
+ neutron_dhcp: *neutron
+ neutron_metadata: *neutron
+ neutron_l3: *neutron
+ neutron_l2gw: *neutron
+ neutron_openvswitch_agent: *neutron
+ neutron_linuxbridge_agent: *neutron
+ neutron_bagpipe_bgp: *neutron
+ nova:
+ bootstrap: *heat
+ db_drop: *heat
+ db_init: *heat
+ ks_user: *heat
+ ks_service: *heat
+ ks_endpoints: *heat
+ nova_api: *nova
+ nova_cell_setup: *nova
+ nova_cell_setup_init: *heat
+ nova_compute: *nova
+ nova_compute_ssh: *nova
+ nova_conductor: *nova
+ nova_consoleauth: *nova
+ nova_db_sync: *nova
+ nova_novncproxy: *nova
+ nova_novncproxy_assets: *nova
+ nova_placement: *nova
+ nova_scheduler: *nova
+ nova_spiceproxy: *nova
+ nova_spiceproxy_assets: *nova
+ nova_service_cleaner: "docker.io/openstackhelm/ceph-config-helper:latest-ubuntu_xenial"
+ openvswitch:
+ openvswitch_db_server: *openvswitch
+ openvswitch_vswitchd: *openvswitch
+...