Reliable volume for minio server and e2e support 84/1884/7
authorChen, Tingjie <tingjie.chen@intel.com>
Tue, 22 Oct 2019 16:28:40 +0000 (16:28 +0000)
committerKuralamudhan Ramakrishnan <kuralamudhan.ramakrishnan@intel.com>
Fri, 1 Nov 2019 03:24:28 +0000 (03:24 +0000)
1. Add E2E test for MinIO Server side, since the image upload to
MinIO Server and we need to check the actual size of the object
in MinIO Server, after PATCH operations, check the object size
with REST parameters with image size.

2. Fix an issue for MinIO Service, in the install.sh of MinIO
Server, the minio-service.yaml need to be created and then we
can get the Endpoint IP for Minio client operations.

3. Reliable Volume implementation, which use Ceph CSI to provide
Ceph RBD Volume to export by MinIO. But this feature is NOT enabled
yet, and NO impact for current local volume, since there are some
other changes since I saw REST API Agent service also use the volume
with MinIO, this feature can be enable in next release.

4. Add uninstall script to clean the Storageclass/PV/PVC and other
resources in MinIO Server deloyment.

Change-Id: I17250be6ee1d64b98ad789673621f2f2e0009c3e
Signed-off-by: Chen, Tingjie <tingjie.chen@intel.com>
24 files changed:
cmd/bpa-restapi-agent/bpa_api_install.sh
cmd/bpa-restapi-agent/e2e_test.sh
cmd/bpa-restapi-agent/service.yml
deploy/kud-plugin-addons/minio/install.sh [deleted file]
deploy/kud-plugin-addons/minio/lib/minio.sh [new file with mode: 0755]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/cephfs/csi-provisioner-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/rbd/csi-nodeplugin-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/rbd/csi-provisioner-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/pvc.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-ceph-cluster.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-common.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-operator-with-csi.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-toolbox.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/secret.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/ceph-volume/storageclass.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/minio/yaml/install.sh [new file with mode: 0755]
deploy/kud-plugin-addons/minio/yaml/local/local-pv.yaml [moved from deploy/kud-plugin-addons/minio/local-pv.yaml with 94% similarity]
deploy/kud-plugin-addons/minio/yaml/local/local-pvc.yaml [moved from deploy/kud-plugin-addons/minio/local-pvc.yaml with 86% similarity]
deploy/kud-plugin-addons/minio/yaml/local/local-sc.yaml [moved from deploy/kud-plugin-addons/minio/local-sc.yaml with 100% similarity]
deploy/kud-plugin-addons/minio/yaml/minio-deployment.yaml [moved from deploy/kud-plugin-addons/minio/minio-deployment.yaml with 96% similarity]
deploy/kud-plugin-addons/minio/yaml/minio-service.yaml [moved from deploy/kud-plugin-addons/minio/minio-service.yaml with 100% similarity]
deploy/kud-plugin-addons/minio/yaml/reliable_volume_install.sh [new file with mode: 0755]
deploy/kud-plugin-addons/minio/yaml/uninstall.sh [new file with mode: 0755]

index 843727a..3b8428e 100755 (executable)
@@ -8,7 +8,7 @@ kubectl apply -f bpa_api_cluster_role.yml
 
 kubectl apply -f bpa_api_cluster_role_binding.yml
 
-pushd $ICN_DIR/deploy/kud-plugin-addons/minio
+pushd $ICN_DIR/deploy/kud-plugin-addons/minio/yaml
 
 ./install.sh
 
index cc41f21..bfc9478 100755 (executable)
@@ -3,6 +3,7 @@
 ICN_DIR=$(dirname "$(dirname "$PWD")")
 
 source "$ICN_DIR/env/lib/common.sh"
+source "$ICN_DIR/deploy/kud-plugin-addons/minio/lib/minio.sh"
 
 #create sample image
 if true ; then
@@ -68,5 +69,13 @@ call_api --request PATCH --data-binary "@/tmp/sample_image" \
 http://$IP:9015/v1/baremetalcluster/alpha/beta/container_images/qwerty123 \
 --header "Upload-Offset: 0" --header "Expect:" -i
 
+MINIO_IP=$(kubectl get services | grep minio-service | awk '{print $3}')
+setup_mc $MINIO_IP
+obj_size=$(get_object_size container qwerty123)
+echo "Got obj size: $obj_size"
+if [[ $obj_size != $IMAGE_SIZE ]]; then
+    exit 1
+fi
+
 call_api -i -X DELETE \
 http://$IP:9015/v1/baremetalcluster/alpha/beta/container_images/qwerty123
index db3163b..b121c73 100644 (file)
@@ -33,7 +33,7 @@ spec:
       - name: storage\r
         persistentVolumeClaim:\r
           # Name of the PVC created earlier\r
-          claimName: minio-local-claim\r
+          claimName: minio-local-pvc\r
       containers:\r
       - name: bpa-api1\r
         image: akraino.org/icn/bpa-restapi-agent:latest\r
diff --git a/deploy/kud-plugin-addons/minio/install.sh b/deploy/kud-plugin-addons/minio/install.sh
deleted file mode 100755 (executable)
index b81d8b1..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/bash
-
-ICN_DIR=$(dirname "$(dirname "$(dirname "$PWD")")")
-
-# Make sure 64GB+ free space.
-
-echo "s"|sudo -S mkdir /mnt/minio
-
-# Create local-sc persistent volume first since not support dynamic provisioning.
-kubectl apply -f $ICN_DIR/deploy/kud-plugin-addons/minio/local-pv.yaml
-
-# Create storage class for local-sc
-kubectl apply -f $ICN_DIR/deploy/kud-plugin-addons/minio/local-sc.yaml
-
-# Create persistent volume claim for minio server
-kubectl apply -f $ICN_DIR/deploy/kud-plugin-addons/minio/local-pvc.yaml
-
-# Create deployment of MinIO server
-kubectl apply -f $ICN_DIR/deploy/kud-plugin-addons/minio/minio-deployment.yaml
-
-# Create service for MinIO
-# kubectl create -f minio-service.yaml
-
diff --git a/deploy/kud-plugin-addons/minio/lib/minio.sh b/deploy/kud-plugin-addons/minio/lib/minio.sh
new file mode 100755 (executable)
index 0000000..cae9a3e
--- /dev/null
@@ -0,0 +1,45 @@
+#!/bin/bash
+
+IP=$(kubectl get services | grep minio-service | awk '{print $3}')
+
+function setup_mc {
+    wget_mc=`wget https://dl.min.io/client/mc/release/linux-amd64/mc -O ./mc`
+    result=$wget_mc
+    if [ "$?" != 0 ]; then
+        return 1
+    fi
+    chmod +x ./mc
+    mc_add=`./mc config host add mhost http://$1:9000 ICN-ACCESSKEYID ICN-SECRETACCESSKEY`
+    result=$(echo $mc_add | grep successfully)
+    if [ "$result" != "" ]; then
+        return 0
+    else
+        return 1
+    fi
+}
+
+function get_object_size {
+    #echo "Check the object size of bucket: $1, object: $2.\n"
+
+    mc_ls=`./mc ls --json mhost/$1/$2`
+    size=$(echo $mc_ls | grep size | sed 's/.*"size":\([0-9]*\).*/\1/g')
+
+    if [ "$size" != "" ]; then
+        echo $((10#${size}))
+        return 0
+    else
+        echo 0
+        return 1
+    fi
+}
+
+#setup_mc
+#echo "setup mhost result: $?"
+
+# example test for bucket: binary, object: mc
+#mc mb mhost/binary
+#mc cp ./mc mhost/binary
+# echo '$? = '"$?"
+#size=$(get_object_size container qwerty123)
+#echo "size = $size"
+
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml
new file mode 100644 (file)
index 0000000..5fb0bb1
--- /dev/null
@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-plugin-sa
+  namespace: rook-ceph
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-plugin-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: cephfs-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/cephfs/csi-provisioner-rbac.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/cephfs/csi-provisioner-rbac.yaml
new file mode 100644 (file)
index 0000000..fdcc18b
--- /dev/null
@@ -0,0 +1,55 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-provisioner-sa
+  namespace: rook-ceph
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: cephfs-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/rbd/csi-nodeplugin-rbac.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/rbd/csi-nodeplugin-rbac.yaml
new file mode 100644 (file)
index 0000000..d37d0cc
--- /dev/null
@@ -0,0 +1,53 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-plugin-sa
+  namespace: rook-ceph
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-plugin-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: rbd-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/rbd/csi-provisioner-rbac.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/csi/rbac/rbd/csi-provisioner-rbac.yaml
new file mode 100644 (file)
index 0000000..028d7bd
--- /dev/null
@@ -0,0 +1,83 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-provisioner-sa
+  namespace: rook-ceph
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["get", "create", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotcontents"]
+    verbs: ["create", "get", "list", "watch", "update", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["apiextensions.k8s.io"]
+    resources: ["customresourcedefinitions"]
+    verbs: ["create"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: rbd-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/pvc.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/pvc.yaml
new file mode 100644 (file)
index 0000000..b9db71c
--- /dev/null
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: rbd-pvc
+spec:
+  accessModes:
+  - ReadWriteOnce
+  resources:
+    requests:
+      storage: 40Gi
+  storageClassName: csi-rbd
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-ceph-cluster.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-ceph-cluster.yaml
new file mode 100644 (file)
index 0000000..0e1ffba
--- /dev/null
@@ -0,0 +1,125 @@
+#################################################################################################################
+# Define the settings for the rook-ceph cluster with common settings for a production cluster.
+# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
+# in this example. See the documentation for more details on storage settings available.
+#################################################################################################################
+
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+  name: rook-ceph
+  namespace: rook-ceph
+spec:
+  cephVersion:
+    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
+    # v12 is luminous, v13 is mimic, and v14 is nautilus.
+    # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
+    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+    image: ceph/ceph:v13.2.2-20190410
+    # Whether to allow unsupported versions of Ceph. Currently luminous, mimic and nautilus are supported, with the recommendation to upgrade to nautilus.
+    # Do not set to true in production.
+    allowUnsupported: false
+  # The path on the host where configuration files will be persisted. Must be specified.
+  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
+  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
+  dataDirHostPath: /var/lib/rook
+  # set the amount of mons to be started
+  mon:
+    count: 3
+    allowMultiplePerNode: true
+  # enable the ceph dashboard for viewing cluster status
+  dashboard:
+    enabled: true
+    # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
+    # urlPrefix: /ceph-dashboard
+    # serve the dashboard at the given port.
+    # port: 8443
+    # serve the dashboard using SSL
+    # ssl: true
+  network:
+    # toggle to use hostNetwork
+    hostNetwork: false
+  rbdMirroring:
+    # The number of daemons that will perform the rbd mirroring.
+    # rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
+    workers: 0
+  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
+  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
+  # tolerate taints with a key of 'storage-node'.
+#  placement:
+#    all:
+#      nodeAffinity:
+#        requiredDuringSchedulingIgnoredDuringExecution:
+#          nodeSelectorTerms:
+#          - matchExpressions:
+#            - key: role
+#              operator: In
+#              values:
+#              - storage-node
+#      podAffinity:
+#      podAntiAffinity:
+#      tolerations:
+#      - key: storage-node
+#        operator: Exists
+# The above placement information can also be specified for mon, osd, and mgr components
+#    mon:
+#    osd:
+#    mgr:
+  annotations:
+#    all:
+#    mon:
+#    osd:
+# If no mgr annotations are set, prometheus scrape annotations will be set by default.
+#   mgr:
+  resources:
+# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
+#    mgr:
+#      limits:
+#        cpu: "500m"
+#        memory: "1024Mi"
+#      requests:
+#        cpu: "500m"
+#        memory: "1024Mi"
+# The above example requests/limits can also be added to the mon and osd components
+#    mon:
+#    osd:
+  storage: # cluster level storage configuration and selection
+    useAllNodes: true
+    useAllDevices: false
+    deviceFilter:
+    location:
+    config:
+      # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
+      # Set the storeType explicitly only if it is required not to use the default.
+      # storeType: bluestore
+      metadataDevice: # "md0" specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
+      databaseSizeMB: "10240" # uncomment if the disks are smaller than 100 GB
+      journalSizeMB: "10240"  # uncomment if the disks are 20 GB or smaller
+      # osdsPerDevice: "1" # this value can be overridden at the node or device level
+      # encryptedDevice: "true" # the default value for this option is "false"
+# Cluster level list of directories to use for filestore-based OSD storage. If uncommented, this example would create an OSD under the dataDirHostPath.
+    directories:
+    - path: "/var/lib/rook/storage-dir"
+# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
+# nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+#    nodes:
+#    - name: "172.17.4.101"
+#      directories: # specific directories to use for storage can be specified for each node
+#      - path: "/rook/storage-dir"
+#      resources:
+#        limits:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#        requests:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#    - name: "172.17.4.201"
+#      devices: # specific devices to use for storage can be specified for each node
+#      - name: "sdb"
+#      - name: "nvme01" # multiple osds can be created on high performance devices
+#        config:
+#          osdsPerDevice: "5"
+#      config: # configuration can be specified at the node level which overrides the cluster level config
+#        storeType: filestore
+#    - name: "172.17.4.301"
+#      deviceFilter: "^sd."
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-common.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-common.yaml
new file mode 100644 (file)
index 0000000..e6366a0
--- /dev/null
@@ -0,0 +1,618 @@
+###################################################################################################################
+# Create the common resources that are necessary to start the operator and the ceph cluster.
+# These resources *must* be created before the operator.yaml and cluster.yaml or their variants.
+# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace.
+#
+# If the operator needs to manage multiple clusters (in different namespaces), see the section below
+# for "cluster-specific resources". The resources below that section will need to be created for each namespace
+# where the operator needs to manage the cluster. The resources above that section do not be created again.
+###################################################################################################################
+
+# Namespace where the operator and other rook resources are created
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: rook-ceph
+---
+# The CRD declarations
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephclusters.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephCluster
+    listKind: CephClusterList
+    plural: cephclusters
+    singular: cephcluster
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            cephVersion:
+              properties:
+                allowUnsupported:
+                  type: boolean
+                image:
+                  type: string
+                name:
+                  pattern: ^(luminous|mimic|nautilus)$
+                  type: string
+            dashboard:
+              properties:
+                enabled:
+                  type: boolean
+                urlPrefix:
+                  type: string
+                port:
+                  type: integer
+            dataDirHostPath:
+              pattern: ^/(\S+)
+              type: string
+            mon:
+              properties:
+                allowMultiplePerNode:
+                  type: boolean
+                count:
+                  maximum: 9
+                  minimum: 1
+                  type: integer
+                preferredCount:
+                  maximum: 9
+                  minimum: 0
+                  type: integer
+              required:
+              - count
+            network:
+              properties:
+                hostNetwork:
+                  type: boolean
+            storage:
+              properties:
+                nodes:
+                  items: {}
+                  type: array
+                useAllDevices: {}
+                useAllNodes:
+                  type: boolean
+          required:
+          - mon
+  additionalPrinterColumns:
+    - name: DataDirHostPath
+      type: string
+      description: Directory used on the K8s nodes
+      JSONPath: .spec.dataDirHostPath
+    - name: MonCount
+      type: string
+      description: Number of MONs
+      JSONPath: .spec.mon.count
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+    - name: State
+      type: string
+      description: Current State
+      JSONPath: .status.state
+    - name: Health
+      type: string
+      description: Ceph Health
+      JSONPath: .status.ceph.health
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephfilesystems.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephFilesystem
+    listKind: CephFilesystemList
+    plural: cephfilesystems
+    singular: cephfilesystem
+  scope: Namespaced
+  version: v1
+  additionalPrinterColumns:
+    - name: MdsCount
+      type: string
+      description: Number of MDSs
+      JSONPath: .spec.metadataServer.activeCount
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephnfses.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephNFS
+    listKind: CephNFSList
+    plural: cephnfses
+    singular: cephnfs
+    shortNames:
+    - nfs
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstores.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStore
+    listKind: CephObjectStoreList
+    plural: cephobjectstores
+    singular: cephobjectstore
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstoreusers.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStoreUser
+    listKind: CephObjectStoreUserList
+    plural: cephobjectstoreusers
+    singular: cephobjectstoreuser
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephblockpools.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephBlockPool
+    listKind: CephBlockPoolList
+    plural: cephblockpools
+    singular: cephblockpool
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: volumes.rook.io
+spec:
+  group: rook.io
+  names:
+    kind: Volume
+    listKind: VolumeList
+    plural: volumes
+    singular: volume
+    shortNames:
+    - rv
+  scope: Namespaced
+  version: v1alpha2
+---
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  - pods
+  - pods/log
+  - services
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  - daemonsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The role for the operator to manage resources in its own namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - configmaps
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - daemonsets
+  - statefulsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  # Pod access is needed for fencing
+  - pods
+  # Node access is needed for determining nodes where mons should run
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+    # PVs and PVCs are managed by the Rook provisioner
+  - persistentvolumes
+  - persistentvolumeclaims
+  - endpoints
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+- apiGroups:
+  - rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+---
+# The rook system service account used by the operator, agent, and discovery pods
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+---
+# Grant the operator, agent, and discovery agents access to resources in the namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-global
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-global
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+#################################################################################################################
+# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph"
+# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles
+# and bindings accordingly.
+#################################################################################################################
+# Service account for the Ceph OSDs. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+---
+# Service account for the Ceph Mgr. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+rules:
+- apiGroups: [""]
+  resources: ["configmaps"]
+  verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: rook-ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system-rules
+  namespace: rook-ceph
+  labels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+---
+  # Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cluster-mgmt
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-mgr
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-cluster
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-operator-with-csi.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-operator-with-csi.yaml
new file mode 100644 (file)
index 0000000..c34b879
--- /dev/null
@@ -0,0 +1,73 @@
+#################################################################################################################
+# The deployment for the rook operator that enables the ceph-csi driver for beta testing.
+# For example, to create the rook-ceph cluster:
+#################################################################################################################
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-operator
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+spec:
+  selector:
+    matchLabels:
+      app: rook-ceph-operator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-operator
+    spec:
+      serviceAccountName: rook-ceph-system
+      containers:
+      - name: rook-ceph-operator
+        image: rook/ceph:v1.0.4
+        args: ["ceph", "operator"]
+        volumeMounts:
+        - mountPath: /var/lib/rook
+          name: rook-config
+        - mountPath: /etc/ceph
+          name: default-config-dir
+        env:
+        - name: ROOK_CURRENT_NAMESPACE_ONLY
+          value: "true"
+        # CSI enablement
+        - name: ROOK_CSI_ENABLE_CEPHFS
+          value: "true"
+        - name: ROOK_CSI_CEPHFS_IMAGE
+          value: "quay.io/cephcsi/cephfsplugin:v1.0.0"
+        - name: ROOK_CSI_ENABLE_RBD
+          value: "true"
+        - name: ROOK_CSI_RBD_IMAGE
+          value: "quay.io/cephcsi/rbdplugin:v1.0.0"
+        - name: ROOK_CSI_REGISTRAR_IMAGE
+          value: "quay.io/k8scsi/csi-node-driver-registrar:v1.0.2"
+        - name: ROOK_CSI_PROVISIONER_IMAGE
+          value: "quay.io/k8scsi/csi-provisioner:v1.0.1"
+        - name: ROOK_CSI_SNAPSHOTTER_IMAGE
+          value: "quay.io/k8scsi/csi-snapshotter:v1.0.1"
+        - name: ROOK_CSI_ATTACHER_IMAGE
+          value: "quay.io/k8scsi/csi-attacher:v1.0.1"
+        # The name of the node to pass with the downward API
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        # The pod name to pass with the downward API
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        # The pod namespace to pass with the downward API
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+      volumes:
+      - name: rook-config
+        emptyDir: {}
+      - name: default-config-dir
+        emptyDir: {}
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-toolbox.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/rook-toolbox.yaml
new file mode 100644 (file)
index 0000000..de442f0
--- /dev/null
@@ -0,0 +1,59 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-tools
+  namespace: rook-ceph
+  labels:
+    app: rook-ceph-tools
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: rook-ceph-tools
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-tools
+    spec:
+      dnsPolicy: ClusterFirstWithHostNet
+      containers:
+      - name: rook-ceph-tools
+        image: rook/ceph:v1.0.4
+        command: ["/tini"]
+        args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
+        imagePullPolicy: IfNotPresent
+        env:
+          - name: ROOK_ADMIN_SECRET
+            valueFrom:
+              secretKeyRef:
+                name: rook-ceph-mon
+                key: admin-secret
+        securityContext:
+          privileged: true
+        volumeMounts:
+          - mountPath: /dev
+            name: dev
+          - mountPath: /sys/bus
+            name: sysbus
+          - mountPath: /lib/modules
+            name: libmodules
+          - name: mon-endpoint-volume
+            mountPath: /etc/rook
+      # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
+      hostNetwork: true
+      volumes:
+        - name: dev
+          hostPath:
+            path: /dev
+        - name: sysbus
+          hostPath:
+            path: /sys/bus
+        - name: libmodules
+          hostPath:
+            path: /lib/modules
+        - name: mon-endpoint-volume
+          configMap:
+            name: rook-ceph-mon-endpoints
+            items:
+            - key: data
+              path: mon-endpoints
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/secret.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/secret.yaml
new file mode 100644 (file)
index 0000000..ce8749e
--- /dev/null
@@ -0,0 +1,14 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: csi-rbd-secret
+  namespace: default
+data:
+  # Key value corresponds to a user name defined in Ceph cluster
+  admin: "QVFDYzBxOWR1MVFkTmhBQStiQWtyK0pMaW8vQkMxNDVBbnMrVHc9PQ=="
+  # Key value corresponds to a user name defined in Ceph cluster
+  kube: "QVFEajNhOWRaNS80TWhBQTZCZjRCbDZidVBRUVg5WjlHVktxM2c9PQ=="
+  # if monValueFromSecret is set to "monitors", uncomment the
+  # following and set the mon there
+  #monitors: BASE64-ENCODED-Comma-Delimited-Mons
diff --git a/deploy/kud-plugin-addons/minio/yaml/ceph-volume/storageclass.yaml b/deploy/kud-plugin-addons/minio/yaml/ceph-volume/storageclass.yaml
new file mode 100644 (file)
index 0000000..dd624be
--- /dev/null
@@ -0,0 +1,45 @@
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+  name: rbd
+  namespace: rook-ceph
+spec:
+  replicated:
+    size: 3
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+   name: csi-rbd
+provisioner: rbd.csi.ceph.com
+parameters:
+    # Comma separated list of Ceph monitors
+    # if using FQDN, make sure csi plugin's dns policy is appropriate.
+    monitors: 10.106.26.186:6789,10.108.31.63:6789,10.111.49.99:6789
+
+    # if "monitors" parameter is not set, driver to get monitors from same
+    # secret as admin/user credentials. "monValueFromSecret" provides the
+    # key in the secret whose value is the mons
+    #monValueFromSecret: "monitors"
+    
+    # Ceph pool into which the RBD image shall be created
+    pool: rbd
+
+    # RBD image format. Defaults to "2".
+    imageFormat: "2"
+
+    # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
+    imageFeatures: layering
+    
+    # The secrets have to contain Ceph admin credentials.
+    csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
+    csi.storage.k8s.io/provisioner-secret-namespace: default
+    csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret
+    csi.storage.k8s.io/node-publish-secret-namespace: default
+
+    # Ceph users for operating RBD
+    adminid: admin
+    userid: kube
+    # uncomment the following to use rbd-nbd as mounter on supported nodes
+    #mounter: rbd-nbd
+reclaimPolicy: Delete
diff --git a/deploy/kud-plugin-addons/minio/yaml/install.sh b/deploy/kud-plugin-addons/minio/yaml/install.sh
new file mode 100755 (executable)
index 0000000..912c911
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+ICN_DIR=$(dirname "$(dirname "$(dirname "$(dirname "$PWD")")")")
+
+# Make sure 64GB+ free space.
+
+echo "s"|sudo -S mkdir /mnt/minio
+
+echo "ICN_DIR: $ICN_DIR"
+# Create local-sc persistent volume first since not support dynamic provisioning.
+kubectl apply -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/local/local-pv.yaml
+
+# Create storage class for local-sc
+kubectl apply -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/local/local-sc.yaml
+
+# Create persistent volume claim for minio server
+kubectl apply -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/local/local-pvc.yaml
+
+# Create deployment of MinIO server
+kubectl apply -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/minio-deployment.yaml
+
+# Create service for MinIO
+kubectl apply -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/minio-service.yaml
+
@@ -1,7 +1,7 @@
 apiVersion: v1
 kind: PersistentVolume
 metadata:
-  name: minio-pv
+  name: minio-local-pv
 spec:
   capacity:
     storage: 64Gi
@@ -1,7 +1,7 @@
 kind: PersistentVolumeClaim
 apiVersion: v1
 metadata:
-  name: minio-local-claim
+  name: minio-local-pvc
 spec:
   accessModes:
   - ReadWriteOnce
@@ -20,7 +20,7 @@ spec:
       - name: storage
         persistentVolumeClaim:
           # Name of the PVC created earlier
-          claimName: minio-local-claim
+          claimName: minio-local-pvc
       containers:
       - name: minio
         # Pulls the default Minio image from Docker Hub
diff --git a/deploy/kud-plugin-addons/minio/yaml/reliable_volume_install.sh b/deploy/kud-plugin-addons/minio/yaml/reliable_volume_install.sh
new file mode 100755 (executable)
index 0000000..e3d1c5b
--- /dev/null
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+# Make sure 64GB+ free space.
+
+#echo "...... Deploy Rook Ceph cluster ......"
+
+kubectl create -f ./ceph-volume/rook-common.yaml
+kubectl apply -f ./ceph-volume/csi/rbac/rbd/
+kubectl apply -f ./ceph-volume/csi/rbac/cephfs/
+kubectl create -f ./ceph-volume/rook-operator-with-csi.yaml
+
+# Bring up cluster with default configuration, current Ceph version is:
+# ceph/ceph:v14.2.1-20190430, and create osd with default /dev/sdb on each node
+kubectl create -f ./ceph-volume/rook-ceph-cluster.yaml
+kubectl create -f ./ceph-volume/rook-toolbox.yaml
+
+echo "...... Deploy MinIO server ......"
+echo "Waiting for 5 minutes for Ceph cluster bring up ..."
+sleep 600
+
+ceph_mon_ls="$(kubectl exec -ti -n rook-ceph $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') -- bash -c "cat /etc/ceph/ceph.conf | grep mon_host")"
+ceph_mon_ls="$(echo $ceph_mon_ls | cut -d "=" -f2)"
+ceph_mon_ls="$(echo ${ceph_mon_ls%?})"
+echo $ceph_mon_ls
+sed -i "s?monitors:.*?monitors: $ceph_mon_ls?" ceph-volume/storageclass.yaml
+
+kubectl exec -ti -n rook-ceph $(kubectl -n rook-ceph get pod -l "app=rook-ceph-operator" -o jsonpath='{.items[0].metadata.name}') -- bash -c "ceph -c /var/lib/rook/rook-ceph/rook-ceph.config auth get-or-create-key client.kube mon \"allow profile rbd\" osd \"profile rbd pool=rbd\""
+
+admin_secret="$(kubectl exec -ti -n rook-ceph $(kubectl -n rook-ceph get pod -l "app=rook-ceph-operator" -o jsonpath='{.items[0].metadata.name}') -- bash -c "ceph auth get-key client.admin|base64")"
+kube_secret="$(kubectl exec -ti -n rook-ceph $(kubectl -n rook-ceph get pod -l "app=rook-ceph-operator" -o jsonpath='{.items[0].metadata.name}') -- bash -c "ceph auth get-key client.kube|base64")"
+
+admin_secret="$(echo ${admin_secret%?})"
+kube_secret="$(echo ${kube_secret%?})"
+echo $admin_secret
+echo $kube_secret
+
+sed -i "s?admin:.*?admin: \"$admin_secret\"?" ceph-volume/secret.yaml
+sed -i "s?kube:.*?kube: \"$kube_secret\"?" ceph-volume/secret.yaml
+
+kubectl create -f ./ceph-volume/storageclass.yaml
+kubectl create -f ./ceph-volume/secret.yaml
+kubectl create -f ./ceph-volume/pvc.yaml
+
+# Create deployment of MinIO server
+kubectl create -f minio-deployment.yaml
+
+# Create service for MinIO
+kubectl create -f minio-service.yaml
+
diff --git a/deploy/kud-plugin-addons/minio/yaml/uninstall.sh b/deploy/kud-plugin-addons/minio/yaml/uninstall.sh
new file mode 100755 (executable)
index 0000000..fc127ba
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/bash
+
+ICN_DIR=$(dirname "$(dirname "$(dirname "$(dirname "$PWD")")")")
+
+# Make sure 64GB+ free space.
+
+#echo "s"|sudo -S mkdir /mnt/minio
+
+# Remove service for MinIO
+kubectl delete -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/minio-service.yaml
+
+# Remove deployment of MinIO server
+kubectl delete -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/minio-deployment.yaml
+
+# Remove persistent volume claim for minio server
+kubectl delete -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/local/local-pvc.yaml
+
+# Remove storage class for local-sc
+kubectl delete -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/local/local-sc.yaml
+
+# Remove local-sc persistent volume first since not support dynamic provisioning.
+kubectl delete -f $ICN_DIR/deploy/kud-plugin-addons/minio/yaml/local/local-pv.yaml