2 # yamllint disable rule:comments rule:comments-indentation rule:line-length
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
18 ### Version numbering:
20 ### - Major structural changes compared to the previous version.
21 ### - Requires all users to update their user configuration to
24 ### - Significant changes in the template within current structure
25 ### (e.g. new mandatory attributes)
26 ### - Requires all users to update their user configuration according
27 ### to the new template (e.g. add new mandatory attributes)
29 ### - Minor changes in template (e.g. new optional attributes or
30 ### changes in possible values, value ranges or default values)
31 ### - Backwards compatible
34 ### Cloud name can consist of lower case letters, digits and dash (-).
35 ### Name must start and end with a letter or a digit.
41 ### Time related configuration
43 ### A list of NTP server IP addresses.
44 ntp_servers: [VALUE1, VALUE2, ...]
46 ### linux time zone name (e.g. Europe/Helsinki or Asia/Shanghai)
49 ### supported values for authentication method of NTP:
50 ### crypto, symmetric, none
53 ### If you are using authenticated NTP you must provide the url of the keys used for authentication
56 ### User related configuration
58 ### Admin user details
59 admin_user_name: <VALUE>
60 ### Example how to create SHA512 password hash that can be given as
61 ### the admin password:
62 ### python -c "from passlib.hash import sha512_crypt; import getpass; print sha512_crypt.using(rounds=5000).hash(getpass.getpass())"
63 admin_user_password: <VALUE>
65 ### User details for the initial user (gets user_management_admin role)
66 initial_user_name: <VALUE>
67 initial_user_password: <VALUE>
69 ### For CaaS deployments
70 ### keystone admin users password (at least 8 characters; at least one letter)
71 admin_password: <VALUE>
73 ### Networking configuration
75 ### A list of DNS server IP addresses.
76 ### Max two addresses supported.
79 ### Optional. Default network device mtu.
80 ### Valid value range: 1280 - 9000
81 ### When not set, defaults to 1500
85 ### Optional network mtu
86 ### If not defined default value is used.
91 ### User defined name for network domain
93 ### Network address in CIDR format
99 ### IP address of the gateway for default route
102 ### Range for external IPs
103 ### - First IP address of the range is reserved for vip
104 ### (Public API access)
105 ### - following addresses are reserved for cmanagement hosts
106 ### (one address per management hosts)
107 ip_range_start: <VALUE>
108 ip_range_end: <VALUE>
111 ### This configuration is required if there are storage hosts in
112 ### the configuration. This network is used for OSD Replication.
113 #infra_storage_cluster:
114 ### Optional network mtu
115 ### If not defined default value is used.
120 ### User defined name for network domain
122 ### Network address in CIDR format (e.g. 192.168.4.0/26)
128 ### Optional IP range from the CIDR to limit IP addresses to use
129 #ip_range_start: <VALUE>
130 #ip_range_end: <VALUE>
132 ### Optional static routes
134 # - {to: <CIDR>, via: <IP>}
136 ### This network is used for:
137 ### - Internal communication/API
138 ### - SSH between hosts
139 ### - Internal services
140 ### - NTP between hosts
142 ### Optional network mtu
143 ### If not defined default value is used.
148 ### User defined name for network domain
150 ### Network address in CIDR format
151 cidr: 192.168.12.0/26
156 ### Optional IP range from the CIDR to limit IP addresses to use
157 #ip_range_start: <VALUE>
158 #ip_range_end: <VALUE>
160 ### Optional static routes
162 # - {to: 192.168.12.0/22, via: 192.168.12.1}
163 ### Use above structure for all the other network domains
165 #cidr: 192.168.12.64/26
167 #ip_range_start: 192.168.12.68
168 #ip_range_end: 192.168.12.126
170 # - {to: 192.168.12.0/22, via: 192.168.12.65}
172 ### Provider networks
173 ### Provider network to physical interface mapping is done
174 ### in the network profile configuration
176 ### Any number of provider network names
177 #<provider_network_name1>:
178 ### Optional. Set provider network mtu.
179 ### If not defined default value is used.
182 ### Provider network vlan ranges
183 #vlan_ranges: "<VID_START1>:<VID_END1>,<VID_START2>:<VID_END2>,..."
185 ### Use above structure for all the other provider networks
186 #<provider_network_name2>:
189 ### Needed for non-CaaS deployments
191 ### keystone admin user password (at least 8 characters; at least one letter)
192 #admin_password: <VALUE>
194 ### Caas configuration
196 ### This parameter globally sets a maximum allowed writable disk space quota for every container,
197 ### on all caas related hosts. The quota physically forbids any containers from storing data more
198 ### than the allowed size on its own rootfs.
199 ### These ephemeral disks are allocated from the Docker Cinder volume attached to all hosts,
200 ### and as such are limited in size. The quota protects the containers from possible noisy neighbours
201 ### by limiting their maximum consumption, and thus assuring that no one faulty container
202 ### can eat up the disk space of a whole container execution host.
204 docker_size_quota: "2G"
206 ### This parameter, if provided, will be set into the configuration of the CaaS cluster's
207 ### internal DNS server's configuration. Whenever a DNS query cannot be served by the default server,
208 ### it will be forwarded to the configured address, regardless which sub-domain the query belongs to.
209 ### Please note, that in case the address points out of the infrastructure,
210 ### connectivity between the infrastructure and the external DNS server needs to be separately set-up.
211 #upstream_nameserver: "10.74.3.252"
213 ### This parameter, if provided, will be set into the configuration of the CaaS cluster's
214 ### internal DNS server's configuration. Whenever a DNS query cannot be served by the default server,
215 ### it might be forwarded to the address set into the "stub_domain_ip" parameter.
216 ### However, forwarding only happens if "stub_domain_name" matches the domain name in the DNS query.
217 ### Please note, that in case the address points out of the infrastructure, connectivity between the
218 ### infrastructure and the external DNS server needs to be separately set-up.
223 ### This parameter, if provided, controls how long a Helm install procedure waits before exiting with a timeout error.
224 ### Value is interpreted in minutes.
225 #helm_operation_timeout: "900"
227 ### The Docker container run-time engine creates a Linux network bridge by default, and provisions
228 ### a /24 IPv4 network on top of it. Even though this bridge is not used within CaaS subsytem,
229 ### the existence of this bridge is not configurable.
230 ### However, in certain customer environments the default IPv4 network of this bridge can collide with
231 ### real customer networks. To avoid IP collision issues in such cases, the application operator can globally set
232 ### the Docker bridge CIDRs of all host via this parameter.
233 #docker0_cidr: "172.17.0.1/16"
235 ### Mandatory parameter. All the infrastructure's HTTP servers are secured with TLS.
236 ### The certificates of the servers are created in infrastructure deployment time, and are signed by an externally provided CA certificate.
237 ### This CA certificate can be configured by setting its encrypted format into this configuration parameter.
238 ### Due to CBAM limitation the value of this parameters shall be provided as a one-element list in JSON format
239 ### e.g. ["U2FsdGVkX1+iaWyYk3W01IFpfVdughR5aDKo2NpcBw2USt.."]
240 encrypted_ca: '["<ENCRYPTED_CA>"]'
242 ### Manadatory parameter. All the infrastructure's HTTP servers are secured with TLS.
243 ### The certificates of the servers are created in infrastructure deployment time, and are signed by an externally provided CA certificate.
244 ### This CA certificate can be configured by setting its encrypted format into the "encrypted_CA" configuration parameter.
245 ### The key which can be used to decrypt this CA certificate shall be configured into this configuration parameter, but also encrypted.
246 ###This key shall be encrypted by the super-secret, static key, known only by infrastructure developers, and cloud operators.
247 ### Due to CBAM limitation the value of this parameters shall be provided as a one-element list in JSON format
248 ### e.g. ["U2FsdGVkX1+WlNST+W.."]
249 encrypted_ca_key: '["<ENCRYPTED_CA_KEY>"]'
251 ### This parameter defines the DNS domain served by the REC DNS server for example
252 ### in-cluster Kubernetes Services all belongs to this domain DNS queries.
253 ### Outside of this domain are either rejected, or forwarded to a configured upstream DNS server (if, any).
254 ### The default value is: rec.io
255 #dns_domain: "<VALUE>"
257 ### Storage configuration
260 ### Configuration of supported storage backends.
261 ### At least one backend must be onfigured and only one backend can be enabled.
262 ### If more than one backend is configured then one should be enabled (enabled:true)
263 ### and the others should be disabled (enabled: false).
266 ### The ceph can be enbled only in a multi node configuration.
267 #enabled: <true/false>
269 ### The OSD replica count.
270 ### The number of replicas for objects in the pool.
271 ### Valid value range for any production environment: 2 - 3
272 ### (for testing purposes only, in environments with very limited
273 ### storage resource, value 1 can be used as well)
274 ### Required if there are ceph nodes.
275 #osd_pool_default_size: <VALUE>
280 ### Users can define multiple network profiles depending on the hardware.
282 ### Compulsory if bonding interfaces used for infra networks.
283 ### Bonding options for linux bonding interfaces used for infra
285 ### Supported options: "mode=lacp" and "mode=active-backup"
286 ### In "mode=lacp" both nics are active simultaniously.
287 ### In "mode=active-backup" only one slave in the bond is active and
288 ### the another slave becomes active only if the active slave fails.
289 #linux_bonding_options: <VALUE>
291 ### Optional bonding interfaces
293 ### Any number of bonding interface names.
294 ### Bonding interface name syntax must be bond[n]
295 ### where n is a number.
296 ### Numbers in bonding interface names must be
297 ### consecutive natural numbers starting from 0
298 ### (bond0, bond1, bond2, ...)
300 ### Value is a list of at least two physical interface names
301 ### (e.g. bond0: [eno3, eno4])
302 #<bonding interface name>: [<VALUE1>, <VALUE2>, ...]
304 ### Interface-subnet mapping
305 ### Any number of (name: value) pairs to map interfaces
306 ### (bonding or physical interface name) to subnets
307 ### Value is list of subnets
308 ### (e.g. bond0: [infra_internal, infra_storage_cluster] or
309 ### eno3: [infra_external])
310 ### An interface can be mapped to at most one non-vlan subnet
311 interface_net_mapping:
312 #<interface_name>: [<VALUE1>, <VALUE2>, ...]
314 ### Optional provider network interface
315 #provider_network_interfaces:
316 ### Provider network physical interface.
317 ### Either Ethernet or bonding interface.
319 ### Provider networks on this interface.
320 ### Provider networks must be defined also in the networking:
321 ### provider_networks: configuration.
322 #provider_networks: [<VALUE1>,<VALUE2>,...]
323 ### Use above structure for all the provider network interfaces
328 ### Optional SR-IOV provider networks
329 #sriov_provider_networks:
330 ### Provider network name.
331 ### Must be defined also in the
332 ### networking: provider_networks: configuration.
333 #<provider_network_name1>:
334 ### SR-IOV physical function interfaces
335 ### Multiple Ethernet interfaces can be mapped to implement one
337 ### SR-IOV interfaces can be used also for the infra networks
338 ### but only if network card type supports that
339 ### (for example Mellanox ConnectX-4 Lx
340 ### does and Intel Niantic doesn't). Another restriction is that
341 ### bond option cannot be "mode=lacp" if SR-IOV interfaces are
342 ### also bonding slave interfaces.
343 #interfaces: [<VALUE1>, <VALUE2>, ...]
345 ### Optional VF count per physical PF interface
346 ### If this parameter is not defined, default is to create
347 ### maximum supported amount of VF interfaces. In case of
348 ### Mellanox NIC (mlx5_core driver) given VF count will be
349 ### configured to the NIC HW as a maximum VF count.
352 ### Optional VF trusted mode setting
353 ### If enabled, PF can accept some priviledged operations from
354 ### the VF. See the NIC manufacturer documentation for more
357 #trusted: [true|false]
358 ### Use above structure for all the SR-IOV provider networks in
360 #<provider_network_name2>
363 ### Performance profiles
364 performance_profiles:
366 ### The parameters specified here are affected by the type
367 ### of network profile selected for the node as follows:
368 ### The following types are supported:
369 ### SR-IOV: no mandatory parameters, but following can be used:
370 ### - default_hugepagesz
374 ### Configuration for huge page usage.
375 ### Notice: Huge page values must be in balance with RAM available
378 ### Default huge page size. Valid values are 2M and 1G.
379 #default_hugepagesz: <VALUE>
380 ### Huge page size selection parameter. Valid values are 2M and 1G.
382 ### The number of allocated persistent huge pages
385 ### Host CPU allocations.
386 ### Any host CPUs that are not allocated for some specific purpose
387 ### here will be automatically assigned by the system:
388 ### - All remaining CPUs are allocated for the host platform.
390 ### Optional. Allocate CPUs for the host platform.
391 ### The configured counts determine the number of full CPU cores to
392 ### allocate from each specified NUMA node. If hyperthreading is
393 ### enabled, all sibling threads are automatically grouped together
394 ### and counted as one CPU core. The actual configurable range
395 ### depends on target hardware CPU topology and desired performance
397 ### Notice: The host platform must always have have at least one CPU
398 ### core from NUMA node 0.
405 ### The storage_profiles section name is part of mandatory configuration.
407 ### There must always be at least one profile defined when ceph or lvm
408 ### have been configured and enabled as the backend in the storage section.
409 ### This profile represents the enabled backend in question.
411 ### In addition the user can optionally configure storage instance profiles
415 ### Name of the storage backend. The allowed values for the backend are
421 ### Backend specific attributes - see examples of supported backend
422 ### specific attributes in the following storage profile templates.
425 #ceph_backend_profile:
427 ### A storage profile for ceph backend. This storage profile is linked
428 ### to all of the storage hosts. The ceph profile is possible only with
429 ### a multihost configuration with three (3) management hosts.
434 ### Number of devices that should be used as osd disks in one node.
435 ### This is a mandatory attribute for ceph storage hosts.
436 ### Max number of ceph osd disks is 3.
437 #nr_of_ceph_osd_disks: <VALUE>
440 ### The share ratio between the Openstack & CaaS subsystems for
441 ### the available Ceph storage. Expected to be in ratio format (A:B),
442 ### where the first number is for Openstack, the second one is for CaaS subsystem.
443 ### Always quote the value! Default value is "1:0".
444 #ceph_pg_openstack_caas_share_ratio: "<VALUE>"
448 ### A storage profile to create bare lvm volumes.
450 ### This profile can be used to create an LVM volume that will be
451 ### available under the defined directory for any further use.
453 ### This profile is mandatory for caas_worker hosts and should be
454 ### mounted to /var/lib/docker.
459 ### This paramater contains which partitions to be used
460 ### for instance volume group.
461 #lvm_instance_storage_partitions: [<VALUE1>, <VALUE2>, ...]
464 ### This paramater defines bare_lvm how much space should take
466 ### Note that this option left for compatibility reasons, actual value
467 ### dynamically calculated.
469 #bare_lvm_storage_percentage: <VALUE>
472 ### This parameter contains the name for the created LVM volume.
476 ### This parameter contains the directory where to mount
477 ### the backend of this profile.
481 ### This parameter contains the mount options used to mount
482 ### the backend. The format must be a valid fstab format.
483 ### By default it is empty.
484 #mount_options: <VALUE>
487 ### The value of this parameter is used to protect the entire GRUB 2 menu structure of all the infrastructure nodes.
488 ### The configured value should be a properly salted PBKDF2 (Password-Based Key Derivation Function 2) hash.
489 ### Interactive tool "grub2-mkpasswd-pbkdf2" can be used to create the hash.
490 ### Operators will be only able to make changes in the GRUB menu, if the
491 ### hashed version of the typed-in password matches with the value of this parameter.
493 #grub2_password: "<VALUE>"
494 ### User lockout parameters are set with failed_login_attempts (default is 5)
495 ### and lockout_time (default is 300 seconds (5 minutes))
496 #failed_login_attempts: <VALUE>
497 #lockout_time: <VALUE>
502 ### The service profiles for this node. Valid values are the following:
503 ### management/base/storage/caas_master/caas_worker
504 ### Currently supported service profile combinations:
505 ### 1 Any permutations of: management/base/storage e.g: [ manangement, storage ]
506 ### 2 Either or both [management, caas_master] e.g.: [ management, caas_master ]
507 ### 3 caas_worker can't be combined with any other profile: e.g.: [ caas_worker ]
508 service_profiles: [<VALUE1>, <VALUE2>, ...]
510 ### The network profiles for this node, the value used in the list
511 ### should match a profile from the network_profiles section.
512 ### Only one network profile per host supported at the moment.
513 network_profiles: [profile1]
515 ### The storage profiles for this node, the value used in the list
516 ### should match a profile from the storage_profiles section.
517 #storage_profiles: [profile1]
519 ### The performance profiles for this node, the value used in the list
520 ### should match a profile from the performance_profiles section.
521 ### Only one performance profile per host supported at the moment.
522 #performance_profiles: [profile1]
524 ### The kubernetes label set of the node, you can define an arbitrary set of key-value pairs.
525 ### These key-value pairs will be provisioned to the corresponding
526 ### Kubernetes node object as kubernetes labels.
527 ### Optional parameter, only interpreted when the node has a CaaS subsystem related service profile.
528 ### For any other node this attribute will be silently ignored.
529 ### The keys under "labels" can be anything, except: 'name', 'nodetype', 'nodeindex', 'nodename'
530 ### These labels are reserved for infrastructure usage
532 # type: "performance"
534 # hyperthreading: "off"
537 ### Network domain for this node
538 ### Value should match some network domain in networking section.
539 network_domain: rack-1
541 ### HW management (e.g. IPMI or iLO) address and credentials
547 ### Optional parameter needed for virtual deployment to identify the
548 ### nodes the mac address for the provisioning interface
549 #mgmt_mac: [<VALUE1>, <VALUE2>, ...]