2 # yamllint disable rule:comments rule:comments-indentation rule:line-length
6 # Licensed under the Apache License, Version 2.0 (the "License");
7 # you may not use this file except in compliance with the License.
8 # You may obtain a copy of the License at
10 # http://www.apache.org/licenses/LICENSE-2.0
12 # Unless required by applicable law or agreed to in writing, software
13 # distributed under the License is distributed on an "AS IS" BASIS,
14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 # See the License for the specific language governing permissions and
16 # limitations under the License.
18 ### Version numbering:
20 ### - Major structural changes compared to the previous version.
21 ### - Requires all users to update their user configuration to
24 ### - Significant changes in the template within current structure
25 ### (e.g. new mandatory attributes)
26 ### - Requires all users to update their user configuration according
27 ### to the new template (e.g. add new mandatory attributes)
29 ### - Minor changes in template (e.g. new optional attributes or
30 ### changes in possible values, value ranges or default values)
31 ### - Backwards compatible
34 ### Cloud name can consist of lower case letters, digits and dash (-).
35 ### Name must start and end with a letter or a digit.
41 ### Time related configuration
43 ### A list of NTP server IP addresses.
44 ntp_servers: [VALUE1, VALUE2, ...]
46 ### linux time zone name (e.g. Europe/Helsinki or Asia/Shanghai)
49 ### supported values for authentication method of NTP:
50 ### crypto, symmetric, none
53 ### If you are using authenticated NTP you must provide the url of the keys used for authentication
56 ### User related configuration
58 ### Admin user details
59 admin_user_name: <VALUE>
60 ### Example how to create SHA512 password hash that can be given as
61 ### the admin password:
62 ### python -c "from passlib.hash import sha512_crypt; import getpass; print sha512_crypt.using(rounds=5000).hash(getpass.getpass())"
63 admin_user_password: <VALUE>
65 # Authorized public keys for the admin user
66 #admin_user_authorized_keys: []
68 ### User details for the initial user (gets user_management_admin role)
69 initial_user_name: <VALUE>
70 initial_user_password: <VALUE>
72 ### For CaaS deployments
73 ### keystone admin users password (at least 8 characters; at least one letter)
74 admin_password: <VALUE>
76 ### Networking configuration
78 ### A list of DNS server IP addresses.
79 ### Max two addresses supported.
82 ### Optional. Default network device mtu.
83 ### Valid value range: 1280 - 9000
84 ### When not set, defaults to 1500
88 ### Optional network mtu
89 ### If not defined default value is used.
94 ### User defined name for network domain
96 ### Network address in CIDR format
102 ### IP address of the gateway for default route
105 ### Range for external IPs
106 ### - First IP address of the range is reserved for vip
107 ### (Public API access)
108 ### - following addresses are reserved for cmanagement hosts
109 ### (one address per management hosts)
110 ip_range_start: <VALUE>
111 ip_range_end: <VALUE>
114 ### This configuration is required if there are storage hosts in
115 ### the configuration. This network is used for OSD Replication.
116 #infra_storage_cluster:
117 ### Optional network mtu
118 ### If not defined default value is used.
123 ### User defined name for network domain
125 ### Network address in CIDR format (e.g. 192.168.4.0/26)
131 ### Optional IP range from the CIDR to limit IP addresses to use
132 #ip_range_start: <VALUE>
133 #ip_range_end: <VALUE>
135 ### Optional static routes
137 # - {to: <CIDR>, via: <IP>}
139 ### This network is used for:
140 ### - Internal communication/API
141 ### - SSH between hosts
142 ### - Internal services
143 ### - NTP between hosts
145 ### Optional network mtu
146 ### If not defined default value is used.
151 ### User defined name for network domain
153 ### Network address in CIDR format
154 cidr: 192.168.12.0/26
159 ### Optional IP range from the CIDR to limit IP addresses to use
160 #ip_range_start: <VALUE>
161 #ip_range_end: <VALUE>
163 ### Optional static routes
165 # - {to: 192.168.12.0/22, via: 192.168.12.1}
166 ### Use above structure for all the other network domains
168 #cidr: 192.168.12.64/26
170 #ip_range_start: 192.168.12.68
171 #ip_range_end: 192.168.12.126
173 # - {to: 192.168.12.0/22, via: 192.168.12.65}
175 ### Provider networks
176 ### Provider network to physical interface mapping is done
177 ### in the network profile configuration
179 ### Any number of provider network names
180 #<provider_network_name1>:
181 ### Optional. Set provider network mtu.
182 ### If not defined default value is used.
185 ### Provider network vlan ranges
186 #vlan_ranges: "<VID_START1>:<VID_END1>,<VID_START2>:<VID_END2>,..."
188 ### Use above structure for all the other provider networks
189 #<provider_network_name2>:
192 ### Needed for non-CaaS deployments
194 ### keystone admin user password (at least 8 characters; at least one letter)
195 #admin_password: <VALUE>
197 ### Caas configuration
199 ### This parameter globally sets a maximum allowed writable disk space quota for every container,
200 ### on all caas related hosts. The quota physically forbids any containers from storing data more
201 ### than the allowed size on its own rootfs.
202 ### These ephemeral disks are allocated from the Docker Cinder volume attached to all hosts,
203 ### and as such are limited in size. The quota protects the containers from possible noisy neighbours
204 ### by limiting their maximum consumption, and thus assuring that no one faulty container
205 ### can eat up the disk space of a whole container execution host.
207 docker_size_quota: "2G"
209 ### This parameter, if provided, will be set into the configuration of the CaaS cluster's
210 ### internal DNS server's configuration. Whenever a DNS query cannot be served by the default server,
211 ### it will be forwarded to the configured address, regardless which sub-domain the query belongs to.
212 ### Please note, that in case the address points out of the infrastructure,
213 ### connectivity between the infrastructure and the external DNS server needs to be separately set-up.
214 #upstream_nameserver: "10.74.3.252"
216 ### This parameter, if provided, will be set into the configuration of the CaaS cluster's
217 ### internal DNS server's configuration. Whenever a DNS query cannot be served by the default server,
218 ### it might be forwarded to the address set into the "stub_domain_ip" parameter.
219 ### However, forwarding only happens if "stub_domain_name" matches the domain name in the DNS query.
220 ### Please note, that in case the address points out of the infrastructure, connectivity between the
221 ### infrastructure and the external DNS server needs to be separately set-up.
226 ### This parameter, if provided, controls how long a Helm install procedure waits before exiting with a timeout error.
227 ### Value is interpreted in minutes.
228 #helm_operation_timeout: "900"
230 ### The Docker container run-time engine creates a Linux network bridge by default, and provisions
231 ### a /24 IPv4 network on top of it. Even though this bridge is not used within CaaS subsytem,
232 ### the existence of this bridge is not configurable.
233 ### However, in certain customer environments the default IPv4 network of this bridge can collide with
234 ### real customer networks. To avoid IP collision issues in such cases, the application operator can globally set
235 ### the Docker bridge CIDRs of all host via this parameter.
236 #docker0_cidr: "172.17.0.1/16"
238 ### Mandatory parameter. All the infrastructure's HTTP servers are secured with TLS.
239 ### The certificates of the servers are created in infrastructure deployment time, and are signed by an externally provided CA certificate.
240 ### This CA certificate can be configured by setting its encrypted format into this configuration parameter.
241 ### Due to CBAM limitation the value of this parameters shall be provided as a one-element list in JSON format
242 ### e.g. ["U2FsdGVkX1+iaWyYk3W01IFpfVdughR5aDKo2NpcBw2USt.."]
243 encrypted_ca: '["<ENCRYPTED_CA>"]'
245 ### Manadatory parameter. All the infrastructure's HTTP servers are secured with TLS.
246 ### The certificates of the servers are created in infrastructure deployment time, and are signed by an externally provided CA certificate.
247 ### This CA certificate can be configured by setting its encrypted format into the "encrypted_CA" configuration parameter.
248 ### The key which can be used to decrypt this CA certificate shall be configured into this configuration parameter, but also encrypted.
249 ###This key shall be encrypted by the super-secret, static key, known only by infrastructure developers, and cloud operators.
250 ### Due to CBAM limitation the value of this parameters shall be provided as a one-element list in JSON format
251 ### e.g. ["U2FsdGVkX1+WlNST+W.."]
252 encrypted_ca_key: '["<ENCRYPTED_CA_KEY>"]'
254 ### This parameter defines the DNS domain served by the REC DNS server for example
255 ### in-cluster Kubernetes Services all belongs to this domain DNS queries.
256 ### Outside of this domain are either rejected, or forwarded to a configured upstream DNS server (if, any).
257 ### The default value is: rec.io
258 #dns_domain: "<VALUE>"
261 ### This list contains all provider networks dedicated to be used by CaaS tenant users.
262 ### These provider networks needs to binded homogenously to all CaaS hosts and the
263 ### provider network type must be caas.
264 ### SR-IOV provider networks also supported.
265 #tenant_networks: ["tenant_net1", "tenant_net2"]
267 ### Storage configuration
270 ### Configuration of supported storage backends.
271 ### At least one backend must be onfigured and only one backend can be enabled.
272 ### If more than one backend is configured then one should be enabled (enabled:true)
273 ### and the others should be disabled (enabled: false).
276 ### The ceph can be enbled only in a multi node configuration.
277 #enabled: <true/false>
279 ### The OSD replica count.
280 ### The number of replicas for objects in the pool.
281 ### Valid value range for any production environment: 2 - 3
282 ### (for testing purposes only, in environments with very limited
283 ### storage resource, value 1 can be used as well)
284 ### Required if there are ceph nodes.
285 #osd_pool_default_size: <VALUE>
290 ### Users can define multiple network profiles depending on the hardware.
292 ### Compulsory if bonding interfaces used for infra networks.
293 ### Bonding options for linux bonding interfaces used for infra
295 ### Supported options: "mode=lacp" and "mode=active-backup"
296 ### In "mode=lacp" both nics are active simultaniously.
297 ### In "mode=active-backup" only one slave in the bond is active and
298 ### the another slave becomes active only if the active slave fails.
299 #linux_bonding_options: <VALUE>
301 ### Optional bonding interfaces
303 ### Any number of bonding interface names.
304 ### Bonding interface name syntax must be bond[n]
305 ### where n is a number.
306 ### Numbers in bonding interface names must be
307 ### consecutive natural numbers starting from 0
308 ### (bond0, bond1, bond2, ...)
310 ### Value is a list of at least two physical interface names
311 ### (e.g. bond0: [eno3, eno4])
312 #<bonding interface name>: [<VALUE1>, <VALUE2>, ...]
314 ### Interface-subnet mapping
315 ### Any number of (name: value) pairs to map interfaces
316 ### (bonding or physical interface name) to subnets
317 ### Value is list of subnets
318 ### (e.g. bond0: [infra_internal, infra_storage_cluster] or
319 ### eno3: [infra_external])
320 ### An interface can be mapped to at most one non-vlan subnet
321 interface_net_mapping:
322 #<interface_name>: [<VALUE1>, <VALUE2>, ...]
324 ### Optional provider network interface
325 #provider_network_interfaces:
326 ### Provider network physical interface.
327 ### Either Ethernet or bonding interface.
329 ### Optional provider network type.
333 ### Containers as a Service (CaaS) provider network
335 ### CaaS bond interfaces are configured as a Linux bond interfaces.
338 ### Provider networks on this interface.
339 ### Provider networks must be defined also in the networking:
340 ### provider_networks: configuration.
341 #provider_networks: [<VALUE1>,<VALUE2>,...]
342 ### Use above structure for all the provider network interfaces
347 ### Optional SR-IOV provider networks
348 #sriov_provider_networks:
349 ### Provider network name.
350 ### Must be defined also in the
351 ### networking: provider_networks: configuration.
352 #<provider_network_name1>:
353 ### SR-IOV physical function interfaces
354 ### Multiple Ethernet interfaces can be mapped to implement one
356 ### SR-IOV interfaces can be used also for the infra networks
357 ### but only if network card type supports that
358 ### (for example Mellanox ConnectX-4 Lx
359 ### does and Intel Niantic doesn't). Another restriction is that
360 ### bond option cannot be "mode=lacp" if SR-IOV interfaces are
361 ### also bonding slave interfaces.
362 #interfaces: [<VALUE1>, <VALUE2>, ...]
364 ### Optional VF count per physical PF interface
365 ### If this parameter is not defined, default is to create
366 ### maximum supported amount of VF interfaces. In case of
367 ### Mellanox NIC (mlx5_core driver) given VF count will be
368 ### configured to the NIC HW as a maximum VF count.
371 ### Optional VF trusted mode setting
372 ### If enabled, PF can accept some priviledged operations from
373 ### the VF. See the NIC manufacturer documentation for more
376 #trusted: [true|false]
378 ### Optional provider network type
379 ### - caas: configure as CaaS SR-IOV cluster network
382 ### Use above structure for all the SR-IOV provider networks in
384 #<provider_network_name2>
387 ### Performance profiles
388 performance_profiles:
390 ### The parameters specified here are affected by the type
391 ### of network profile selected for the node as follows:
392 ### The following types are supported:
393 ### SR-IOV: no mandatory parameters, but following can be used:
394 ### - default_hugepagesz
398 ### Configuration for huge page usage.
399 ### Notice: Huge page values must be in balance with RAM available
402 ### Default huge page size. Valid values are 2M and 1G.
403 #default_hugepagesz: <VALUE>
404 ### Huge page size selection parameter. Valid values are 2M and 1G.
406 ### The number of allocated persistent huge pages
409 ### Host CPU allocations.
410 ### Any host CPUs that are not allocated for some specific purpose
411 ### here will be automatically assigned by the system:
412 ### - If the node contains 'caas' in its service_profiles remaining
413 ### CPUs are allocated for CaaS CPU pools. Remainder CaaS CPU CPUs
414 ### allocated for default container execution.
415 ### - Any CPUs that don't fall into the above categories are allocated
416 ### for the host platform.
418 ### Optional. Allocate CPUs for the host platform.
419 ### The configured counts determine the number of full CPU cores to
420 ### allocate from each specified NUMA node. If hyperthreading is
421 ### enabled, all sibling threads are automatically grouped together
422 ### and counted as one CPU core. The actual configurable range
423 ### depends on target hardware CPU topology and desired performance
425 ### Notice: The host platform must always have have at least one CPU
426 ### core from NUMA node 0.
431 ### Optional. Create CPU pools in CaaS CPU manager.
432 ### Type of this parameter is dictionary, consisting of the following attributes:
433 ### - exclusive_pool_percentage
434 ### - shared_pool_percentage
435 ### Attributes are optional, but at least one of them shall be defined
436 ### if caas_cpu_pools is defined. The sum of values can't exceed 100.
437 ### Minimum allocation is 1 CPU, which means anything greater than 0
438 ### ensures 1 CPU allocation.
440 #exclusive_pool_percentage: <VALUE>
441 #shared_pool_percentage: <VALUE>
445 ### The storage_profiles section name is part of mandatory configuration.
447 ### There must always be at least one profile defined when ceph or lvm
448 ### have been configured and enabled as the backend in the storage section.
449 ### This profile represents the enabled backend in question.
451 ### In addition the user can optionally configure storage instance profiles
455 ### Name of the storage backend. The allowed values for the backend are
461 ### Backend specific attributes - see examples of supported backend
462 ### specific attributes in the following storage profile templates.
465 #ceph_backend_profile:
467 ### A storage profile for ceph backend. This storage profile is linked
468 ### to all of the storage hosts. The ceph profile is possible only with
469 ### a multihost configuration with three (3) management hosts.
474 ### Number of devices that should be used as osd disks in one node.
475 ### This is a mandatory attribute for ceph storage hosts.
476 ### Max number of ceph osd disks is 3.
477 #nr_of_ceph_osd_disks: <VALUE>
480 ### The share ratio between the Openstack & CaaS subsystems for
481 ### the available Ceph storage. Expected to be in ratio format (A:B),
482 ### where the first number is for Openstack, the second one is for CaaS subsystem.
483 ### Always quote the value! Default value is "1:0".
484 #ceph_pg_openstack_caas_share_ratio: "<VALUE>"
488 ### A storage profile to create bare lvm volumes.
490 ### This profile can be used to create an LVM volume that will be
491 ### available under the defined directory for any further use.
493 ### This profile is mandatory for caas_worker hosts and should be
494 ### mounted to /var/lib/docker.
499 ### This paramater contains which partitions to be used
500 ### for instance volume group.
501 #lvm_instance_storage_partitions: [<VALUE1>, <VALUE2>, ...]
504 ### This paramater defines bare_lvm how much space should take
506 ### Note that this option left for compatibility reasons, actual value
507 ### dynamically calculated.
509 #bare_lvm_storage_percentage: <VALUE>
512 ### This parameter contains the name for the created LVM volume.
516 ### The value of this parameter is used to protect the entire GRUB 2 menu structure of all the infrastructure nodes.
517 ### The configured value should be a properly salted PBKDF2 (Password-Based Key Derivation Function 2) hash.
518 ### Interactive tool "grub2-mkpasswd-pbkdf2" can be used to create the hash.
519 ### Operators will be only able to make changes in the GRUB menu, if the
520 ### hashed version of the typed-in password matches with the value of this parameter.
522 #grub2_password: "<VALUE>"
523 ### User lockout parameters are set with failed_login_attempts (default is 5)
524 ### and lockout_time (default is 300 seconds (5 minutes))
525 #failed_login_attempts: <VALUE>
526 #lockout_time: <VALUE>
531 ### The service profiles for this node. Valid values are the following:
532 ### management/base/storage/caas_master/caas_worker
533 ### Currently supported service profile combinations:
534 ### 1 Any permutations of: management/base/storage e.g: [ manangement, storage ]
535 ### 2 Either or both [management, caas_master] e.g.: [ management, caas_master ]
536 ### 3 caas_worker can't be combined with any other profile: e.g.: [ caas_worker ]
537 service_profiles: [<VALUE1>, <VALUE2>, ...]
539 ### The network profiles for this node, the value used in the list
540 ### should match a profile from the network_profiles section.
541 ### Only one network profile per host supported at the moment.
542 network_profiles: [profile1]
544 ### The storage profiles for this node, the value used in the list
545 ### should match a profile from the storage_profiles section.
546 #storage_profiles: [profile1]
548 ### The performance profiles for this node, the value used in the list
549 ### should match a profile from the performance_profiles section.
550 ### Only one performance profile per host supported at the moment.
551 #performance_profiles: [profile1]
553 ### The kubernetes label set of the node, you can define an arbitrary set of key-value pairs.
554 ### These key-value pairs will be provisioned to the corresponding
555 ### Kubernetes node object as kubernetes labels.
556 ### Optional parameter, only interpreted when the node has a CaaS subsystem related service profile.
557 ### For any other node this attribute will be silently ignored.
558 ### The keys under "labels" can be anything, except: 'name', 'nodetype', 'nodeindex', 'nodename'
559 ### These labels are reserved for infrastructure usage
561 # type: "performance"
563 # hyperthreading: "off"
566 ### Network domain for this node
567 ### Value should match some network domain in networking section.
568 network_domain: rack-1
570 ### HW management (e.g. IPMI or iLO) address and credentials
575 # Optional: the IPMI privilege level to request.
576 # Typical values include 'USER', 'OPERATOR', 'ADMINISTRATOR'
577 # default is 'ADMINISTRATOR' if unspecified.
578 # priv_level: <VALUE>
579 ### Optional parameter needed for virtual deployment to identify the
580 ### nodes the mac address for the provisioning interface
581 #mgmt_mac: [<VALUE1>, <VALUE2>, ...]