Add Rook operator and Ceph cluster bring up script 82/1282/8
authorChen, Tingjie <tingjie.chen@intel.com>
Tue, 30 Jul 2019 08:43:08 +0000 (08:43 +0000)
committerChen, Tingjie <tingjie.chen@intel.com>
Fri, 16 Aug 2019 09:59:22 +0000 (09:59 +0000)
Change-Id: I7f296701ac1126852fee6b0c57b79c2e53455da2
Signed-off-by: Chen, Tingjie <tingjie.chen@intel.com>
18 files changed:
deploy/kud-plugin-addons/rook/README.md
deploy/kud-plugin-addons/rook/yaml/collect_rook_yaml.sh [new file with mode: 0755]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-provisioner-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-nodeplugin-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-provisioner-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/install.sh [new file with mode: 0755]
deploy/kud-plugin-addons/rook/yaml/rook-ceph-cluster.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/rook-common.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/rook-operator-with-csi.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/rook-toolbox.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/pod.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc-restore.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/secret.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshot.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshotclass.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/storageclass.yaml [new file with mode: 0644]

index e69de29..1ce151b 100644 (file)
@@ -0,0 +1,147 @@
+## Intel Rook infrastructure for Ceph cluster deployment
+
+By default create osd on folder /var/lib/rook/storage-dir, and Ceph cluster
+information on /var/lib/rook.
+
+# Precondition
+
+1. Compute node disk space: 20GB+ free disk space.
+
+2. Kubernetes version: Kubernetes version >= 1.13 required by Ceph CSI v1.0.
+Following is the upgrade patch in kud github: https://github.com/onap/multicloud-k8s
+
+```
+$ git diff
+diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
+index 9b36547..5c29fa4 100644
+--- a/kud/deployment_infra/playbooks/kud-vars.yml
++++ b/kud/deployment_infra/playbooks/kud-vars.yml
+@@ -58,7 +58,7 @@ ovn4nfv_version: adc7b2d430c44aa4137ac7f9420e14cfce3fa354
+ ovn4nfv_url: "https://git.opnfv.org/ovn4nfv-k8s-plugin/"
+
+ go_version: '1.12.5'
+-kubespray_version: 2.8.2
+-helm_client_version: 2.9.1
++kubespray_version: 2.9.0
++helm_client_version: 2.13.1
+ # kud playbooks not compatible with 2.8.0 - see MULTICLOUD-634
+ ansible_version: 2.7.10
+diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+index 9966ba8..cacb4b3 100644
+--- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
++++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+@@ -48,7 +48,7 @@ local_volumes_enabled: true
+ local_volume_provisioner_enabled: true
+
+ ## Change this to use another Kubernetes version, e.g. a current beta release
+-kube_version: v1.12.3
++kube_version: v1.13.5
+
+ # Helm deployment
+ helm_enabled: true
+```
+
+After upgraded, the Kubernetes version as following:
+```
+$ kubectl version
+Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.5", GitCommit:"2166946f41b36dea2c4626f90a77706f426cdea2", GitTreeState:"clean", BuildDate:"2019-03-25T15:19:22Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
+Server Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.5", GitCommit:"2166946f41b36dea2c4626f90a77706f426cdea2", GitTreeState:"clean", BuildDate:"2019-03-25T15:19:22Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
+```
+
+If something is wrong with Kubectl server version, you can manually upgrade as
+command:
+```console
+$ kubeadm upgrade apply v1.13.5
+```
+
+# Deployment
+
+To bring up Rook operator(v1.0) and Ceph cluster(Mimic 13.2.2) as following:
+
+```console
+cd yaml
+./install.sh
+```
+
+# Test
+
+If you want to make a test on the ceph sample workload, check as following:
+
+1. Bring up Rook operator and Ceph cluster.
+2. Goto Create storage class.
+
+```console
+kubectl create -f ./test/rbd/storageclass.yaml
+```
+
+3. Create RBD secret.
+```console
+kubectl exec -ti -n rook-ceph rook-ceph-operator-948f8f84c-749zb -- bash -c 
+"ceph -c /var/lib/rook/rook-ceph/rook-ceph.config auth get-or-create-key client.kube mon \"allow profile rbd\" osd \"profile rbd pool=rbd\""
+```
+   You need to replace the pod name with your own rook-operator, refer: kubetl get pod -n rook-ceph
+   Then get secret of admin and client user key by go into operator pod and execute:
+```console
+ceph auth get-key client.admin|base64
+ceph auth get-key client.kube|base64
+```
+  Then fill the key into secret.yaml
+```console
+kubectl create -f ./test/rbd/secret.yaml
+```
+4. Create RBD Persistent Volume Claim
+```console
+kubectl create -f ./test/rbd/pvc.yaml
+```
+5. Create RBD demo pod
+```console
+kubectl creaet -f ./test/rbd/pod.yaml
+```
+6. Check the Volumes created and application mount status
+```console
+tingjie@ceph4:~/bohemian/workspace/rook/Documentation$ kubectl get pvc
+NAME      STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+rbd-pvc   Bound    pvc-98f50bec-8a4f-434d-8def-7b69b628d427   1Gi        RWO            csi-rbd        84m
+tingjie@ceph4:~/bohemian/workspace/rook/Documentation$ kubectl get pod
+NAME              READY   STATUS    RESTARTS   AGE
+csirbd-demo-pod   1/1     Running   0          84m
+tingjie@ceph4:~/bohemian/workspace/rook/Documentation$ kubectl exec -ti csirbd-demo-pod -- bash
+root@csirbd-demo-pod:/# df -h
+Filesystem      Size  Used Avail Use% Mounted on
+overlay         733G   35G  662G   5% /
+tmpfs            64M     0   64M   0% /dev
+tmpfs            32G     0   32G   0% /sys/fs/cgroup
+/dev/sda2       733G   35G  662G   5% /etc/hosts
+shm              64M     0   64M   0% /dev/shm
+/dev/rbd0       976M  2.6M  958M   1% /var/lib/www/html
+tmpfs            32G   12K   32G   1% /run/secrets/kubernetes.io/serviceaccount
+tmpfs            32G     0   32G   0% /proc/acpi
+tmpfs            32G     0   32G   0% /proc/scsi
+tmpfs            32G     0   32G   0% /sys/firmware
+```
+7. Create RBD snapshot-class
+```console
+kubectl create -f ./test/rbd/snapshotclass.yaml
+```
+8. Create Volume snapshot and verify
+```console
+kubectl create -f ./test/rbd/snapshot.yaml
+
+$ kubectl get volumesnapshotclass
+NAME                      AGE
+csi-rbdplugin-snapclass   51s
+$ kubectl get volumesnapshot
+NAME               AGE
+rbd-pvc-snapshot   33s
+
+```
+9. Restore the snapshot to a new PVC and verify
+```console
+kubectl create -f ./test/rbd/pvc-restore.yaml
+
+$ kubectl get pvc
+NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+rbd-pvc           Bound    pvc-98f50bec-8a4f-434d-8def-7b69b628d427   1Gi        RWO            csi-rbd        42h
+rbd-pvc-restore   Bound    pvc-530a4939-e4c0-428d-a072-c9c39d110d7a   1Gi        RWO            csi-rbd        5s
+```
+
diff --git a/deploy/kud-plugin-addons/rook/yaml/collect_rook_yaml.sh b/deploy/kud-plugin-addons/rook/yaml/collect_rook_yaml.sh
new file mode 100755 (executable)
index 0000000..1499c3f
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# usage: collect_rook_yaml.sh [target]
+
+set -ex
+
+if [ $# -ne 1 ] ; then
+    echo "Please input the target folder!"
+    exit 0
+fi
+
+VER="0.1"
+MKDIR_P="mkdir -p"
+target=$1
+temp=rook_yaml
+
+# copy to target
+$MKDIR_P $temp
+cp rook-common.yaml $temp/
+cp rook-operator-with-csi.yaml $temp/
+cp rook-ceph-cluster.yaml $temp/
+cp rook-toolbox.yaml $temp/
+cp -rf ./csi/ $temp/
+cp -rf ./test/ $temp/
+cp install.sh $temp/
+
+if [ ! -d $target/yaml ]; then
+    $MKDIR_P $target/yaml;
+fi;
+
+tar czvf $target/yaml/rook_yaml-$VER.tar.gz $temp/
+
+# clear
+rm -rf $temp
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml
new file mode 100644 (file)
index 0000000..5fb0bb1
--- /dev/null
@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-plugin-sa
+  namespace: rook-ceph
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-plugin-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: cephfs-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-provisioner-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-provisioner-rbac.yaml
new file mode 100644 (file)
index 0000000..fdcc18b
--- /dev/null
@@ -0,0 +1,55 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-provisioner-sa
+  namespace: rook-ceph
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: cephfs-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-nodeplugin-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-nodeplugin-rbac.yaml
new file mode 100644 (file)
index 0000000..d37d0cc
--- /dev/null
@@ -0,0 +1,53 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-plugin-sa
+  namespace: rook-ceph
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-plugin-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: rbd-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-provisioner-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-provisioner-rbac.yaml
new file mode 100644 (file)
index 0000000..028d7bd
--- /dev/null
@@ -0,0 +1,83 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-provisioner-sa
+  namespace: rook-ceph
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["get", "create", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotcontents"]
+    verbs: ["create", "get", "list", "watch", "update", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["apiextensions.k8s.io"]
+    resources: ["customresourcedefinitions"]
+    verbs: ["create"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: rbd-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/install.sh b/deploy/kud-plugin-addons/rook/yaml/install.sh
new file mode 100755 (executable)
index 0000000..00b9e04
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Make sure kubernetes server is up with network dns
+# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/62e44c867a2846fefb68bd5f178daf4da3095ccb/Documentation/kube-flannel.yml
+
+# Remove taint if have
+# kubectl taint nodes master node-role.kubernetes.io/master:NoSchedule-
+
+# Remove remaining config files of last deplpyment
+echo ""|sudo -S rm -rf /var/lib/rook/*
+
+# Create common CRD objects
+kubectl create -f rook-common.yaml
+
+# Create rbac, since rook operator is not permitted to create rbac rules, these
+# rules have to be created outside of operator
+kubectl apply -f ./csi/rbac/rbd/
+kubectl apply -f ./csi/rbac/cephfs/
+
+# Start rook ceph operator with csi support
+kubectl create -f rook-operator-with-csi.yaml
+
+# Bring up cluster with default configuration, current Ceph version is:
+# ceph/ceph:v14.2.1-20190430, and create osd with default /dev/sdb on each node
+kubectl create -f rook-ceph-cluster.yaml
+
+# Start toolbox containers with CLI support, to enter the bash env, use command:
+# kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') bash
+kubectl create -f rook-toolbox.yaml
+
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-ceph-cluster.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-ceph-cluster.yaml
new file mode 100644 (file)
index 0000000..0e1ffba
--- /dev/null
@@ -0,0 +1,125 @@
+#################################################################################################################
+# Define the settings for the rook-ceph cluster with common settings for a production cluster.
+# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
+# in this example. See the documentation for more details on storage settings available.
+#################################################################################################################
+
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+  name: rook-ceph
+  namespace: rook-ceph
+spec:
+  cephVersion:
+    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
+    # v12 is luminous, v13 is mimic, and v14 is nautilus.
+    # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
+    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+    image: ceph/ceph:v13.2.2-20190410
+    # Whether to allow unsupported versions of Ceph. Currently luminous, mimic and nautilus are supported, with the recommendation to upgrade to nautilus.
+    # Do not set to true in production.
+    allowUnsupported: false
+  # The path on the host where configuration files will be persisted. Must be specified.
+  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
+  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
+  dataDirHostPath: /var/lib/rook
+  # set the amount of mons to be started
+  mon:
+    count: 3
+    allowMultiplePerNode: true
+  # enable the ceph dashboard for viewing cluster status
+  dashboard:
+    enabled: true
+    # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
+    # urlPrefix: /ceph-dashboard
+    # serve the dashboard at the given port.
+    # port: 8443
+    # serve the dashboard using SSL
+    # ssl: true
+  network:
+    # toggle to use hostNetwork
+    hostNetwork: false
+  rbdMirroring:
+    # The number of daemons that will perform the rbd mirroring.
+    # rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
+    workers: 0
+  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
+  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
+  # tolerate taints with a key of 'storage-node'.
+#  placement:
+#    all:
+#      nodeAffinity:
+#        requiredDuringSchedulingIgnoredDuringExecution:
+#          nodeSelectorTerms:
+#          - matchExpressions:
+#            - key: role
+#              operator: In
+#              values:
+#              - storage-node
+#      podAffinity:
+#      podAntiAffinity:
+#      tolerations:
+#      - key: storage-node
+#        operator: Exists
+# The above placement information can also be specified for mon, osd, and mgr components
+#    mon:
+#    osd:
+#    mgr:
+  annotations:
+#    all:
+#    mon:
+#    osd:
+# If no mgr annotations are set, prometheus scrape annotations will be set by default.
+#   mgr:
+  resources:
+# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
+#    mgr:
+#      limits:
+#        cpu: "500m"
+#        memory: "1024Mi"
+#      requests:
+#        cpu: "500m"
+#        memory: "1024Mi"
+# The above example requests/limits can also be added to the mon and osd components
+#    mon:
+#    osd:
+  storage: # cluster level storage configuration and selection
+    useAllNodes: true
+    useAllDevices: false
+    deviceFilter:
+    location:
+    config:
+      # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
+      # Set the storeType explicitly only if it is required not to use the default.
+      # storeType: bluestore
+      metadataDevice: # "md0" specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
+      databaseSizeMB: "10240" # uncomment if the disks are smaller than 100 GB
+      journalSizeMB: "10240"  # uncomment if the disks are 20 GB or smaller
+      # osdsPerDevice: "1" # this value can be overridden at the node or device level
+      # encryptedDevice: "true" # the default value for this option is "false"
+# Cluster level list of directories to use for filestore-based OSD storage. If uncommented, this example would create an OSD under the dataDirHostPath.
+    directories:
+    - path: "/var/lib/rook/storage-dir"
+# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
+# nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+#    nodes:
+#    - name: "172.17.4.101"
+#      directories: # specific directories to use for storage can be specified for each node
+#      - path: "/rook/storage-dir"
+#      resources:
+#        limits:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#        requests:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#    - name: "172.17.4.201"
+#      devices: # specific devices to use for storage can be specified for each node
+#      - name: "sdb"
+#      - name: "nvme01" # multiple osds can be created on high performance devices
+#        config:
+#          osdsPerDevice: "5"
+#      config: # configuration can be specified at the node level which overrides the cluster level config
+#        storeType: filestore
+#    - name: "172.17.4.301"
+#      deviceFilter: "^sd."
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-common.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-common.yaml
new file mode 100644 (file)
index 0000000..e6366a0
--- /dev/null
@@ -0,0 +1,618 @@
+###################################################################################################################
+# Create the common resources that are necessary to start the operator and the ceph cluster.
+# These resources *must* be created before the operator.yaml and cluster.yaml or their variants.
+# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace.
+#
+# If the operator needs to manage multiple clusters (in different namespaces), see the section below
+# for "cluster-specific resources". The resources below that section will need to be created for each namespace
+# where the operator needs to manage the cluster. The resources above that section do not be created again.
+###################################################################################################################
+
+# Namespace where the operator and other rook resources are created
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: rook-ceph
+---
+# The CRD declarations
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephclusters.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephCluster
+    listKind: CephClusterList
+    plural: cephclusters
+    singular: cephcluster
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            cephVersion:
+              properties:
+                allowUnsupported:
+                  type: boolean
+                image:
+                  type: string
+                name:
+                  pattern: ^(luminous|mimic|nautilus)$
+                  type: string
+            dashboard:
+              properties:
+                enabled:
+                  type: boolean
+                urlPrefix:
+                  type: string
+                port:
+                  type: integer
+            dataDirHostPath:
+              pattern: ^/(\S+)
+              type: string
+            mon:
+              properties:
+                allowMultiplePerNode:
+                  type: boolean
+                count:
+                  maximum: 9
+                  minimum: 1
+                  type: integer
+                preferredCount:
+                  maximum: 9
+                  minimum: 0
+                  type: integer
+              required:
+              - count
+            network:
+              properties:
+                hostNetwork:
+                  type: boolean
+            storage:
+              properties:
+                nodes:
+                  items: {}
+                  type: array
+                useAllDevices: {}
+                useAllNodes:
+                  type: boolean
+          required:
+          - mon
+  additionalPrinterColumns:
+    - name: DataDirHostPath
+      type: string
+      description: Directory used on the K8s nodes
+      JSONPath: .spec.dataDirHostPath
+    - name: MonCount
+      type: string
+      description: Number of MONs
+      JSONPath: .spec.mon.count
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+    - name: State
+      type: string
+      description: Current State
+      JSONPath: .status.state
+    - name: Health
+      type: string
+      description: Ceph Health
+      JSONPath: .status.ceph.health
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephfilesystems.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephFilesystem
+    listKind: CephFilesystemList
+    plural: cephfilesystems
+    singular: cephfilesystem
+  scope: Namespaced
+  version: v1
+  additionalPrinterColumns:
+    - name: MdsCount
+      type: string
+      description: Number of MDSs
+      JSONPath: .spec.metadataServer.activeCount
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephnfses.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephNFS
+    listKind: CephNFSList
+    plural: cephnfses
+    singular: cephnfs
+    shortNames:
+    - nfs
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstores.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStore
+    listKind: CephObjectStoreList
+    plural: cephobjectstores
+    singular: cephobjectstore
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstoreusers.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStoreUser
+    listKind: CephObjectStoreUserList
+    plural: cephobjectstoreusers
+    singular: cephobjectstoreuser
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephblockpools.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephBlockPool
+    listKind: CephBlockPoolList
+    plural: cephblockpools
+    singular: cephblockpool
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: volumes.rook.io
+spec:
+  group: rook.io
+  names:
+    kind: Volume
+    listKind: VolumeList
+    plural: volumes
+    singular: volume
+    shortNames:
+    - rv
+  scope: Namespaced
+  version: v1alpha2
+---
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  - pods
+  - pods/log
+  - services
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  - daemonsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The role for the operator to manage resources in its own namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - configmaps
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - daemonsets
+  - statefulsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  # Pod access is needed for fencing
+  - pods
+  # Node access is needed for determining nodes where mons should run
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+    # PVs and PVCs are managed by the Rook provisioner
+  - persistentvolumes
+  - persistentvolumeclaims
+  - endpoints
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+- apiGroups:
+  - rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+---
+# The rook system service account used by the operator, agent, and discovery pods
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+---
+# Grant the operator, agent, and discovery agents access to resources in the namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-global
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-global
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+#################################################################################################################
+# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph"
+# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles
+# and bindings accordingly.
+#################################################################################################################
+# Service account for the Ceph OSDs. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+---
+# Service account for the Ceph Mgr. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+rules:
+- apiGroups: [""]
+  resources: ["configmaps"]
+  verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: rook-ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system-rules
+  namespace: rook-ceph
+  labels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+---
+  # Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cluster-mgmt
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-mgr
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-cluster
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-operator-with-csi.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-operator-with-csi.yaml
new file mode 100644 (file)
index 0000000..c34b879
--- /dev/null
@@ -0,0 +1,73 @@
+#################################################################################################################
+# The deployment for the rook operator that enables the ceph-csi driver for beta testing.
+# For example, to create the rook-ceph cluster:
+#################################################################################################################
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-operator
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+spec:
+  selector:
+    matchLabels:
+      app: rook-ceph-operator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-operator
+    spec:
+      serviceAccountName: rook-ceph-system
+      containers:
+      - name: rook-ceph-operator
+        image: rook/ceph:v1.0.4
+        args: ["ceph", "operator"]
+        volumeMounts:
+        - mountPath: /var/lib/rook
+          name: rook-config
+        - mountPath: /etc/ceph
+          name: default-config-dir
+        env:
+        - name: ROOK_CURRENT_NAMESPACE_ONLY
+          value: "true"
+        # CSI enablement
+        - name: ROOK_CSI_ENABLE_CEPHFS
+          value: "true"
+        - name: ROOK_CSI_CEPHFS_IMAGE
+          value: "quay.io/cephcsi/cephfsplugin:v1.0.0"
+        - name: ROOK_CSI_ENABLE_RBD
+          value: "true"
+        - name: ROOK_CSI_RBD_IMAGE
+          value: "quay.io/cephcsi/rbdplugin:v1.0.0"
+        - name: ROOK_CSI_REGISTRAR_IMAGE
+          value: "quay.io/k8scsi/csi-node-driver-registrar:v1.0.2"
+        - name: ROOK_CSI_PROVISIONER_IMAGE
+          value: "quay.io/k8scsi/csi-provisioner:v1.0.1"
+        - name: ROOK_CSI_SNAPSHOTTER_IMAGE
+          value: "quay.io/k8scsi/csi-snapshotter:v1.0.1"
+        - name: ROOK_CSI_ATTACHER_IMAGE
+          value: "quay.io/k8scsi/csi-attacher:v1.0.1"
+        # The name of the node to pass with the downward API
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        # The pod name to pass with the downward API
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        # The pod namespace to pass with the downward API
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+      volumes:
+      - name: rook-config
+        emptyDir: {}
+      - name: default-config-dir
+        emptyDir: {}
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-toolbox.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-toolbox.yaml
new file mode 100644 (file)
index 0000000..de442f0
--- /dev/null
@@ -0,0 +1,59 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-tools
+  namespace: rook-ceph
+  labels:
+    app: rook-ceph-tools
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: rook-ceph-tools
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-tools
+    spec:
+      dnsPolicy: ClusterFirstWithHostNet
+      containers:
+      - name: rook-ceph-tools
+        image: rook/ceph:v1.0.4
+        command: ["/tini"]
+        args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
+        imagePullPolicy: IfNotPresent
+        env:
+          - name: ROOK_ADMIN_SECRET
+            valueFrom:
+              secretKeyRef:
+                name: rook-ceph-mon
+                key: admin-secret
+        securityContext:
+          privileged: true
+        volumeMounts:
+          - mountPath: /dev
+            name: dev
+          - mountPath: /sys/bus
+            name: sysbus
+          - mountPath: /lib/modules
+            name: libmodules
+          - name: mon-endpoint-volume
+            mountPath: /etc/rook
+      # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
+      hostNetwork: true
+      volumes:
+        - name: dev
+          hostPath:
+            path: /dev
+        - name: sysbus
+          hostPath:
+            path: /sys/bus
+        - name: libmodules
+          hostPath:
+            path: /lib/modules
+        - name: mon-endpoint-volume
+          configMap:
+            name: rook-ceph-mon-endpoints
+            items:
+            - key: data
+              path: mon-endpoints
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/pod.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/pod.yaml
new file mode 100644 (file)
index 0000000..3a75fb9
--- /dev/null
@@ -0,0 +1,17 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: csirbd-demo-pod
+spec:
+  containers:
+   - name: web-server
+     image: nginx
+     volumeMounts:
+       - name: mypvc
+         mountPath: /var/lib/www/html
+  volumes:
+   - name: mypvc
+     persistentVolumeClaim:
+       claimName: rbd-pvc
+       readOnly: false
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc-restore.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc-restore.yaml
new file mode 100644 (file)
index 0000000..1fc02d5
--- /dev/null
@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: rbd-pvc-restore
+spec:
+  storageClassName: csi-rbd
+  dataSource:
+    name: rbd-pvc-snapshot
+    kind: VolumeSnapshot
+    apiGroup: snapshot.storage.k8s.io
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc.yaml
new file mode 100644 (file)
index 0000000..3115642
--- /dev/null
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: rbd-pvc
+spec:
+  accessModes:
+  - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+  storageClassName: csi-rbd
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/secret.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/secret.yaml
new file mode 100644 (file)
index 0000000..89c8fe5
--- /dev/null
@@ -0,0 +1,14 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: csi-rbd-secret
+  namespace: default
+data:
+  # Key value corresponds to a user name defined in Ceph cluster
+  admin: "QVFCQzExQmRuTVp0RVJBQW9FWDJmQ1RkTFQ1QWZ4SlU0OHFLc3c9PQ=="
+  # Key value corresponds to a user name defined in Ceph cluster
+  kube: "QVFBOHJGTmRzeDluQ3hBQW1zRXJkT3gybWYyTTQxTzVidG9ONlE9PQ=="
+  # if monValueFromSecret is set to "monitors", uncomment the
+  # following and set the mon there
+  #monitors: BASE64-ENCODED-Comma-Delimited-Mons
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshot.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshot.yaml
new file mode 100644 (file)
index 0000000..f8ba153
--- /dev/null
@@ -0,0 +1,10 @@
+---
+apiVersion: snapshot.storage.k8s.io/v1alpha1
+kind: VolumeSnapshot
+metadata:
+  name: rbd-pvc-snapshot
+spec:
+  snapshotClassName: csi-rbdplugin-snapclass
+  source:
+    name: rbd-pvc
+    kind: PersistentVolumeClaim
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshotclass.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshotclass.yaml
new file mode 100644 (file)
index 0000000..03e52da
--- /dev/null
@@ -0,0 +1,11 @@
+---
+apiVersion: snapshot.storage.k8s.io/v1alpha1
+kind: VolumeSnapshotClass
+metadata:
+  name: csi-rbdplugin-snapclass
+snapshotter: rbd.csi.ceph.com
+parameters:
+  pool: rbd
+  monitors: 10.111.122.22:6789/0,10.104.227.175:6789/0,10.98.129.229:6789/0
+  csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
+  csi.storage.k8s.io/snapshotter-secret-namespace: default
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/storageclass.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/storageclass.yaml
new file mode 100644 (file)
index 0000000..ae8c30d
--- /dev/null
@@ -0,0 +1,45 @@
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+  name: rbd
+  namespace: rook-ceph
+spec:
+  replicated:
+    size: 3
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+   name: csi-rbd
+provisioner: rbd.csi.ceph.com
+parameters:
+    # Comma separated list of Ceph monitors
+    # if using FQDN, make sure csi plugin's dns policy is appropriate.
+    monitors: 10.233.47.29:6789/0,10.233.23.25:6789/0,10.233.48.241:6789/0
+
+    # if "monitors" parameter is not set, driver to get monitors from same
+    # secret as admin/user credentials. "monValueFromSecret" provides the
+    # key in the secret whose value is the mons
+    #monValueFromSecret: "monitors"
+    
+    # Ceph pool into which the RBD image shall be created
+    pool: rbd
+
+    # RBD image format. Defaults to "2".
+    imageFormat: "2"
+
+    # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
+    imageFeatures: layering
+    
+    # The secrets have to contain Ceph admin credentials.
+    csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
+    csi.storage.k8s.io/provisioner-secret-namespace: default
+    csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret
+    csi.storage.k8s.io/node-publish-secret-namespace: default
+
+    # Ceph users for operating RBD
+    adminid: admin
+    userid: kube
+    # uncomment the following to use rbd-nbd as mounter on supported nodes
+    #mounter: rbd-nbd
+reclaimPolicy: Delete