2 # yamllint disable rule:comments rule:comments-indentation rule:line-length
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
18 ### Version numbering:
20 ### - Major structural changes compared to the previous version.
21 ### - Requires all users to update their user configuration to
24 ### - Significant changes in the template within current structure
25 ### (e.g. new mandatory attributes)
26 ### - Requires all users to update their user configuration according
27 ### to the new template (e.g. add new mandatory attributes)
29 ### - Minor changes in template (e.g. new optional attributes or
30 ### changes in possible values, value ranges or default values)
31 ### - Backwards compatible
34 ### Cloud name can consist of lower case letters, digits and dash (-).
35 ### Name must start and end with a letter or a digit.
41 ### Time related configuration
43 ### A list of NTP server IP addresses.
44 ntp_servers: [VALUE1, VALUE2, ...]
46 ### linux time zone name (e.g. Europe/Helsinki or Asia/Shanghai)
49 ### supported values for authentication method of NTP:
50 ### crypto, symmetric, none
53 ### If you are using authenticated NTP you must provide the url of the keys used for authentication
56 ### User related configuration
58 ### Admin user details
59 admin_user_name: <VALUE>
60 ### Example how to create SHA512 password hash that can be given as
61 ### the admin password:
62 ### python -c "from passlib.hash import sha512_crypt; import getpass; print sha512_crypt.using(rounds=5000).hash(getpass.getpass())"
63 admin_user_password: <VALUE>
65 # Authorized public keys for the admin user
66 #admin_user_authorized_keys: []
68 ### User details for the initial user (gets user_management_admin role)
69 initial_user_name: <VALUE>
70 initial_user_password: <VALUE>
72 ### For CaaS deployments
73 ### keystone admin users password (at least 8 characters; at least one letter)
74 admin_password: <VALUE>
76 ### Networking configuration
78 ### A list of DNS server IP addresses.
79 ### Max two addresses supported.
82 ### Optional. Default network device mtu.
83 ### Valid value range: 1280 - 9000
84 ### When not set, defaults to 1500
88 ### Optional network mtu
89 ### If not defined default value is used.
94 ### User defined name for network domain
96 ### Network address in CIDR format
102 ### IP address of the gateway for default route
105 ### Range for external IPs
106 ### - First IP address of the range is reserved for vip
107 ### (Public API access)
108 ### - following addresses are reserved for cmanagement hosts
109 ### (one address per management hosts)
110 ip_range_start: <VALUE>
111 ip_range_end: <VALUE>
114 ### This configuration is required if there are storage hosts in
115 ### the configuration. This network is used for OSD Replication.
116 #infra_storage_cluster:
117 ### Optional network mtu
118 ### If not defined default value is used.
123 ### User defined name for network domain
125 ### Network address in CIDR format (e.g. 192.168.4.0/26)
131 ### Optional IP range from the CIDR to limit IP addresses to use
132 #ip_range_start: <VALUE>
133 #ip_range_end: <VALUE>
135 ### Optional static routes
137 # - {to: <CIDR>, via: <IP>}
139 ### This network is used for:
140 ### - Internal communication/API
141 ### - SSH between hosts
142 ### - Internal services
143 ### - NTP between hosts
145 ### Optional network mtu
146 ### If not defined default value is used.
151 ### User defined name for network domain
153 ### Network address in CIDR format
154 cidr: 192.168.12.0/26
159 ### Optional IP range from the CIDR to limit IP addresses to use
160 #ip_range_start: <VALUE>
161 #ip_range_end: <VALUE>
163 ### Optional static routes
165 # - {to: 192.168.12.0/22, via: 192.168.12.1}
166 ### Use above structure for all the other network domains
168 #cidr: 192.168.12.64/26
170 #ip_range_start: 192.168.12.68
171 #ip_range_end: 192.168.12.126
173 # - {to: 192.168.12.0/22, via: 192.168.12.65}
175 ### Provider networks
176 ### Provider network to physical interface mapping is done
177 ### in the network profile configuration
179 ### Any number of provider network names
180 #<provider_network_name1>:
181 ### Optional. Set provider network mtu.
182 ### If not defined default value is used.
185 ### Provider network vlan ranges
186 #vlan_ranges: "<VID_START1>:<VID_END1>,<VID_START2>:<VID_END2>,..."
188 ### Use above structure for all the other provider networks
189 #<provider_network_name2>:
192 ### Needed for non-CaaS deployments
194 ### keystone admin user password (at least 8 characters; at least one letter)
195 #admin_password: <VALUE>
197 ### Caas configuration
199 ### This parameter globally sets a maximum allowed writable disk space quota for every container,
200 ### on all caas related hosts. The quota physically forbids any containers from storing data more
201 ### than the allowed size on its own rootfs.
202 ### These ephemeral disks are allocated from the Docker Cinder volume attached to all hosts,
203 ### and as such are limited in size. The quota protects the containers from possible noisy neighbours
204 ### by limiting their maximum consumption, and thus assuring that no one faulty container
205 ### can eat up the disk space of a whole container execution host.
207 docker_size_quota: "2G"
209 ### This parameter, if provided, will be set into the configuration of the CaaS cluster's
210 ### internal DNS server's configuration. Whenever a DNS query cannot be served by the default server,
211 ### it will be forwarded to the configured address, regardless which sub-domain the query belongs to.
212 ### Please note, that in case the address points out of the infrastructure,
213 ### connectivity between the infrastructure and the external DNS server needs to be separately set-up.
214 #upstream_nameserver: "10.74.3.252"
216 ### This parameter, if provided, will be set into the configuration of the CaaS cluster's
217 ### internal DNS server's configuration. Whenever a DNS query cannot be served by the default server,
218 ### it might be forwarded to the address set into the "stub_domain_ip" parameter.
219 ### However, forwarding only happens if "stub_domain_name" matches the domain name in the DNS query.
220 ### Please note, that in case the address points out of the infrastructure, connectivity between the
221 ### infrastructure and the external DNS server needs to be separately set-up.
226 ### This parameter, if provided, controls how long a Helm install procedure waits before exiting with a timeout error.
227 ### Value is interpreted in minutes.
228 #helm_operation_timeout: "900"
230 ### The Docker container run-time engine creates a Linux network bridge by default, and provisions
231 ### a /24 IPv4 network on top of it. Even though this bridge is not used within CaaS subsytem,
232 ### the existence of this bridge is not configurable.
233 ### However, in certain customer environments the default IPv4 network of this bridge can collide with
234 ### real customer networks. To avoid IP collision issues in such cases, the application operator can globally set
235 ### the Docker bridge CIDRs of all host via this parameter.
236 #docker0_cidr: "172.17.0.1/16"
238 ### This parameter is used to set the overlay CIDR of the default network for containers, so pods can comminucate
239 ### over this subnet and Kubernetes services are available here also.
240 ### The parameter can be used to make sure the CIDR of this network does not overlap with any customer
241 ### specific provider network's
242 #oam_cidr: "10.244.0.0/16"
244 ### Mandatory parameter. All the infrastructure's HTTP servers are secured with TLS.
245 ### The certificates of the servers are created in infrastructure deployment time, and are signed by an externally provided CA certificate.
246 ### This CA certificate can be configured by setting its encrypted format into this configuration parameter.
247 ### Due to CBAM limitation the value of this parameters shall be provided as a one-element list in JSON format
248 ### e.g. ["U2FsdGVkX1+iaWyYk3W01IFpfVdughR5aDKo2NpcBw2USt.."]
249 encrypted_ca: '["<ENCRYPTED_CA>"]'
251 ### Manadatory parameter. All the infrastructure's HTTP servers are secured with TLS.
252 ### The certificates of the servers are created in infrastructure deployment time, and are signed by an externally provided CA certificate.
253 ### This CA certificate can be configured by setting its encrypted format into the "encrypted_CA" configuration parameter.
254 ### The key which can be used to decrypt this CA certificate shall be configured into this configuration parameter, but also encrypted.
255 ###This key shall be encrypted by the super-secret, static key, known only by infrastructure developers, and cloud operators.
256 ### Due to CBAM limitation the value of this parameters shall be provided as a one-element list in JSON format
257 ### e.g. ["U2FsdGVkX1+WlNST+W.."]
258 encrypted_ca_key: '["<ENCRYPTED_CA_KEY>"]'
260 ### This parameter defines the DNS domain served by the REC DNS server for example
261 ### in-cluster Kubernetes Services all belongs to this domain DNS queries.
262 ### Outside of this domain are either rejected, or forwarded to a configured upstream DNS server (if, any).
263 ### The default value is: rec.io
264 #dns_domain: "<VALUE>"
267 ### This list contains all provider networks dedicated to be used by CaaS tenant users.
268 ### These provider networks needs to binded homogenously to all CaaS hosts and the
269 ### provider network type must be caas.
270 ### SR-IOV provider networks also supported.
271 #tenant_networks: ["tenant_net1", "tenant_net2"]
273 ### Storage configuration
276 ### Configuration of supported storage backends.
277 ### At least one backend must be onfigured and only one backend can be enabled.
278 ### If more than one backend is configured then one should be enabled (enabled:true)
279 ### and the others should be disabled (enabled: false).
282 ### The ceph can be enbled only in a multi node configuration.
283 #enabled: <true/false>
285 ### The OSD replica count.
286 ### The number of replicas for objects in the pool.
287 ### Valid value range for any production environment: 2 - 3
288 ### (for testing purposes only, in environments with very limited
289 ### storage resource, value 1 can be used as well)
290 ### Required if there are ceph nodes.
291 #osd_pool_default_size: <VALUE>
296 ### Users can define multiple network profiles depending on the hardware.
298 ### Compulsory if bonding interfaces used for infra networks.
299 ### Bonding options for linux bonding interfaces used for infra
301 ### Supported options: "mode=lacp" and "mode=active-backup"
302 ### In "mode=lacp" both nics are active simultaniously.
303 ### In "mode=active-backup" only one slave in the bond is active and
304 ### the another slave becomes active only if the active slave fails.
305 #linux_bonding_options: <VALUE>
307 ### Optional bonding interfaces
309 ### Any number of bonding interface names.
310 ### Bonding interface name syntax must be bond[n]
311 ### where n is a number.
312 ### Numbers in bonding interface names must be
313 ### consecutive natural numbers starting from 0
314 ### (bond0, bond1, bond2, ...)
316 ### Value is a list of at least two physical interface names
317 ### (e.g. bond0: [eno3, eno4])
318 #<bonding interface name>: [<VALUE1>, <VALUE2>, ...]
320 ### Interface-subnet mapping
321 ### Any number of (name: value) pairs to map interfaces
322 ### (bonding or physical interface name) to subnets
323 ### Value is list of subnets
324 ### (e.g. bond0: [infra_internal, infra_storage_cluster] or
325 ### eno3: [infra_external])
326 ### An interface can be mapped to at most one non-vlan subnet
327 interface_net_mapping:
328 #<interface_name>: [<VALUE1>, <VALUE2>, ...]
330 ### Optional provider network interface
331 #provider_network_interfaces:
332 ### Provider network physical interface.
333 ### Either Ethernet or bonding interface.
335 ### Optional provider network type.
339 ### Containers as a Service (CaaS) provider network
341 ### CaaS bond interfaces are configured as a Linux bond interfaces.
344 ### Provider networks on this interface.
345 ### Provider networks must be defined also in the networking:
346 ### provider_networks: configuration.
347 #provider_networks: [<VALUE1>,<VALUE2>,...]
348 ### Use above structure for all the provider network interfaces
353 ### Optional SR-IOV provider networks
354 #sriov_provider_networks:
355 ### Provider network name.
356 ### Must be defined also in the
357 ### networking: provider_networks: configuration.
358 #<provider_network_name1>:
359 ### SR-IOV physical function interfaces
360 ### Multiple Ethernet interfaces can be mapped to implement one
362 ### SR-IOV interfaces can be used also for the infra networks
363 ### but only if network card type supports that
364 ### (for example Mellanox ConnectX-4 Lx
365 ### does and Intel Niantic doesn't). Another restriction is that
366 ### bond option cannot be "mode=lacp" if SR-IOV interfaces are
367 ### also bonding slave interfaces.
368 #interfaces: [<VALUE1>, <VALUE2>, ...]
370 ### Optional VF count per physical PF interface
371 ### If this parameter is not defined, default is to create
372 ### maximum supported amount of VF interfaces. In case of
373 ### Mellanox NIC (mlx5_core driver) given VF count will be
374 ### configured to the NIC HW as a maximum VF count.
377 ### Optional VF trusted mode setting
378 ### If enabled, PF can accept some priviledged operations from
379 ### the VF. See the NIC manufacturer documentation for more
382 #trusted: [true|false]
384 ### Optional provider network type
385 ### - caas: configure as CaaS SR-IOV cluster network
388 ### Use above structure for all the SR-IOV provider networks in
390 #<provider_network_name2>
393 ### Performance profiles
394 performance_profiles:
396 ### The parameters specified here are affected by the type
397 ### of network profile selected for the node as follows:
398 ### The following types are supported:
399 ### SR-IOV: no mandatory parameters, but following can be used:
400 ### - default_hugepagesz
404 ### Configuration for huge page usage.
405 ### Notice: Huge page values must be in balance with RAM available
408 ### Default huge page size. Valid values are 2M and 1G.
409 #default_hugepagesz: <VALUE>
410 ### Huge page size selection parameter. Valid values are 2M and 1G.
412 ### The number of allocated persistent huge pages
415 ### Host CPU allocations.
416 ### Any host CPUs that are not allocated for some specific purpose
417 ### here will be automatically assigned by the system:
418 ### - If the node contains 'caas' in its service_profiles remaining
419 ### CPUs are allocated for CaaS CPU pools. Remainder CaaS CPU CPUs
420 ### allocated for default container execution.
421 ### - Any CPUs that don't fall into the above categories are allocated
422 ### for the host platform.
424 ### Optional. Allocate CPUs for the host platform.
425 ### The configured counts determine the number of full CPU cores to
426 ### allocate from each specified NUMA node. If hyperthreading is
427 ### enabled, all sibling threads are automatically grouped together
428 ### and counted as one CPU core. The actual configurable range
429 ### depends on target hardware CPU topology and desired performance
431 ### Notice: The host platform must always have have at least one CPU
432 ### core from NUMA node 0.
437 ### Optional. Performance tuning.
438 ### Valid values are low_latency and standard (default).
439 ### Note that low_latency mode will turn off power saving, etc
440 #tuning: <low_latency|standard>
442 ### Optional. Create CPU pools in CaaS CPU manager.
443 ### Type of this parameter is dictionary, consisting of the following attributes:
444 ### - exclusive_pool_percentage
445 ### - shared_pool_percentage
446 ### Attributes are optional, but at least one of them shall be defined
447 ### if caas_cpu_pools is defined. The sum of values can't exceed 100.
448 ### Minimum allocation is 1 CPU, which means anything greater than 0
449 ### ensures 1 CPU allocation.
451 #exclusive_pool_percentage: <VALUE>
452 #shared_pool_percentage: <VALUE>
456 ### The storage_profiles section name is part of mandatory configuration.
458 ### There must always be at least one profile defined when ceph or lvm
459 ### have been configured and enabled as the backend in the storage section.
460 ### This profile represents the enabled backend in question.
462 ### In addition the user can optionally configure storage instance profiles
466 ### Name of the storage backend. The allowed values for the backend are
472 ### Backend specific attributes - see examples of supported backend
473 ### specific attributes in the following storage profile templates.
476 #ceph_backend_profile:
478 ### A storage profile for ceph backend. This storage profile is linked
479 ### to all of the storage hosts. The ceph profile is possible only with
480 ### a multihost configuration with three (3) management hosts.
485 ### Number of devices that should be used as osd disks in one node.
486 ### This is a mandatory attribute for ceph storage hosts.
487 ### Max number of ceph osd disks is 3.
488 #nr_of_ceph_osd_disks: <VALUE>
491 ### The share ratio between the Openstack & CaaS subsystems for
492 ### the available Ceph storage. Expected to be in ratio format (A:B),
493 ### where the first number is for Openstack, the second one is for CaaS subsystem.
494 ### Always quote the value! Default value is "1:0".
495 #ceph_pg_openstack_caas_share_ratio: "<VALUE>"
499 ### A storage profile to create bare lvm volumes.
501 ### This profile can be used to create an LVM volume that will be
502 ### available under the defined directory for any further use.
504 ### This profile is mandatory for caas_worker hosts and should be
505 ### mounted to /var/lib/docker.
510 ### This paramater contains which partitions to be used
511 ### for instance volume group.
512 #lvm_instance_storage_partitions: [<VALUE1>, <VALUE2>, ...]
515 ### This paramater defines bare_lvm how much space should take
517 ### Note that this option left for compatibility reasons, actual value
518 ### dynamically calculated.
520 #bare_lvm_storage_percentage: <VALUE>
523 ### This parameter contains the name for the created LVM volume.
527 ### The value of this parameter is used to protect the entire GRUB 2 menu structure of all the infrastructure nodes.
528 ### The configured value should be a properly salted PBKDF2 (Password-Based Key Derivation Function 2) hash.
529 ### Interactive tool "grub2-mkpasswd-pbkdf2" can be used to create the hash.
530 ### Operators will be only able to make changes in the GRUB menu, if the
531 ### hashed version of the typed-in password matches with the value of this parameter.
533 #grub2_password: "<VALUE>"
534 ### User lockout parameters are set with failed_login_attempts (default is 5)
535 ### and lockout_time (default is 300 seconds (5 minutes))
536 #failed_login_attempts: <VALUE>
537 #lockout_time: <VALUE>
542 ### The service profiles for this node. Valid values are the following:
543 ### management/base/storage/caas_master/caas_worker
544 ### Currently supported service profile combinations:
545 ### 1 Any permutations of: management/base/storage e.g: [ manangement, storage ]
546 ### 2 Either or both [management, caas_master] e.g.: [ management, caas_master ]
547 ### 3 caas_worker can't be combined with any other profile: e.g.: [ caas_worker ]
548 service_profiles: [<VALUE1>, <VALUE2>, ...]
550 ### The network profiles for this node, the value used in the list
551 ### should match a profile from the network_profiles section.
552 ### Only one network profile per host supported at the moment.
553 network_profiles: [profile1]
555 ### The storage profiles for this node, the value used in the list
556 ### should match a profile from the storage_profiles section.
557 #storage_profiles: [profile1]
559 ### The performance profiles for this node, the value used in the list
560 ### should match a profile from the performance_profiles section.
561 ### Only one performance profile per host supported at the moment.
562 #performance_profiles: [profile1]
564 ### The kubernetes label set of the node, you can define an arbitrary set of key-value pairs.
565 ### These key-value pairs will be provisioned to the corresponding
566 ### Kubernetes node object as kubernetes labels.
567 ### Optional parameter, only interpreted when the node has a CaaS subsystem related service profile.
568 ### For any other node this attribute will be silently ignored.
569 ### The keys under "labels" can be anything, except: 'name', 'nodetype', 'nodeindex', 'nodename'
570 ### These labels are reserved for infrastructure usage
572 # type: "performance"
574 # hyperthreading: "off"
577 ### Network domain for this node
578 ### Value should match some network domain in networking section.
579 network_domain: rack-1
581 ### HW management (e.g. IPMI or iLO) address and credentials
586 # Optional: the IPMI privilege level to request.
587 # Typical values include 'USER', 'OPERATOR', 'ADMINISTRATOR'
588 # default is 'ADMINISTRATOR' if unspecified.
589 # priv_level: <VALUE>
590 ### Optional parameter needed for virtual deployment to identify the
591 ### nodes the mac address for the provisioning interface
592 #mgmt_mac: [<VALUE1>, <VALUE2>, ...]