1 #################################################################################################################
2 # Define the settings for the rook-ceph cluster with common settings for a production cluster.
3 # All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
4 # in this example. See the documentation for more details on storage settings available.
5 #################################################################################################################
7 apiVersion: ceph.rook.io/v1
14 # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
15 # v12 is luminous, v13 is mimic, and v14 is nautilus.
16 # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
17 # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
18 image: ceph/ceph:v13.2.2-20190410
19 # Whether to allow unsupported versions of Ceph. Currently luminous, mimic and nautilus are supported, with the recommendation to upgrade to nautilus.
20 # Do not set to true in production.
21 allowUnsupported: false
22 # The path on the host where configuration files will be persisted. Must be specified.
23 # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
24 # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
25 dataDirHostPath: /var/lib/rook
26 # set the amount of mons to be started
29 allowMultiplePerNode: true
30 # enable the ceph dashboard for viewing cluster status
33 # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
34 # urlPrefix: /ceph-dashboard
35 # serve the dashboard at the given port.
37 # serve the dashboard using SSL
40 # toggle to use hostNetwork
43 # The number of daemons that will perform the rbd mirroring.
44 # rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
46 # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
47 # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
48 # tolerate taints with a key of 'storage-node'.
52 # requiredDuringSchedulingIgnoredDuringExecution:
64 # The above placement information can also be specified for mon, osd, and mgr components
72 # If no mgr annotations are set, prometheus scrape annotations will be set by default.
75 # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
83 # The above example requests/limits can also be added to the mon and osd components
86 storage: # cluster level storage configuration and selection
92 # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
93 # Set the storeType explicitly only if it is required not to use the default.
94 # storeType: bluestore
95 metadataDevice: # "md0" specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
96 databaseSizeMB: "10240" # uncomment if the disks are smaller than 100 GB
97 journalSizeMB: "10240" # uncomment if the disks are 20 GB or smaller
98 # osdsPerDevice: "1" # this value can be overridden at the node or device level
99 # encryptedDevice: "true" # the default value for this option is "false"
100 # Cluster level list of directories to use for filestore-based OSD storage. If uncommented, this example would create an OSD under the dataDirHostPath.
102 - path: "/var/lib/rook/storage-dir"
103 # Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
104 # nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
106 # - name: "172.17.4.101"
107 # directories: # specific directories to use for storage can be specified for each node
108 # - path: "/rook/storage-dir"
116 # - name: "172.17.4.201"
117 # devices: # specific devices to use for storage can be specified for each node
119 # - name: "nvme01" # multiple osds can be created on high performance devices
122 # config: # configuration can be specified at the node level which overrides the cluster level config
123 # storeType: filestore
124 # - name: "172.17.4.301"
125 # deviceFilter: "^sd."