Merge "BPA Provisioning CRD and Controller" into dev/icn-v0.1.0
authorKuralamudhan Ramakrishnan <kuralamudhan.ramakrishnan@intel.com>
Fri, 16 Aug 2019 21:06:58 +0000 (21:06 +0000)
committerGerrit Code Review <gerrit@akraino.org>
Fri, 16 Aug 2019 21:06:58 +0000 (21:06 +0000)
30 files changed:
Makefile
deploy/kud-plugin-addons/rook/README.md
deploy/kud-plugin-addons/rook/yaml/collect_rook_yaml.sh [new file with mode: 0755]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-provisioner-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-nodeplugin-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-provisioner-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/install.sh [new file with mode: 0755]
deploy/kud-plugin-addons/rook/yaml/rook-ceph-cluster.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/rook-common.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/rook-operator-with-csi.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/rook-toolbox.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/pod.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc-restore.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/secret.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshot.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshotclass.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/storageclass.yaml [new file with mode: 0644]
deploy/metal3/scripts/metal3.sh [new file with mode: 0755]
env/01_install_package.sh [deleted file]
env/02_configure.sh [deleted file]
env/03_launch_prereq.sh [deleted file]
env/lib/common.sh [changed mode: 0644->0755]
env/lib/logging.sh [changed mode: 0644->0755]
env/metal3/01_install_package.sh [new file with mode: 0755]
env/metal3/02_configure.sh [new file with mode: 0755]
env/metal3/03_launch_prereq.sh [new file with mode: 0755]
env/ubuntu/bootloader-env/01_bootloader_package_req.sh [new file with mode: 0755]
env/ubuntu/bootloader-env/02_clean_bootloader_package_req.sh [new file with mode: 0755]

index e69de29..567ea2e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -0,0 +1,12 @@
+SHELL:=/bin/bash
+BMDIR:=$(CURDIR)/env/metal3
+METAL3DIR:=$(CURDIR)/deploy/metal3/scripts
+all: bm_install
+
+bm_preinstall:
+       pushd $(BMDIR) && ./01_install_package.sh && ./02_configure.sh && ./03_launch_prereq.sh && popd
+
+bm_install:
+       pushd $(METAL3DIR) && ./metal3.sh && popd 
+
+.PHONY: all bm_preinstall bm_install
index e69de29..1ce151b 100644 (file)
@@ -0,0 +1,147 @@
+## Intel Rook infrastructure for Ceph cluster deployment
+
+By default create osd on folder /var/lib/rook/storage-dir, and Ceph cluster
+information on /var/lib/rook.
+
+# Precondition
+
+1. Compute node disk space: 20GB+ free disk space.
+
+2. Kubernetes version: Kubernetes version >= 1.13 required by Ceph CSI v1.0.
+Following is the upgrade patch in kud github: https://github.com/onap/multicloud-k8s
+
+```
+$ git diff
+diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
+index 9b36547..5c29fa4 100644
+--- a/kud/deployment_infra/playbooks/kud-vars.yml
++++ b/kud/deployment_infra/playbooks/kud-vars.yml
+@@ -58,7 +58,7 @@ ovn4nfv_version: adc7b2d430c44aa4137ac7f9420e14cfce3fa354
+ ovn4nfv_url: "https://git.opnfv.org/ovn4nfv-k8s-plugin/"
+
+ go_version: '1.12.5'
+-kubespray_version: 2.8.2
+-helm_client_version: 2.9.1
++kubespray_version: 2.9.0
++helm_client_version: 2.13.1
+ # kud playbooks not compatible with 2.8.0 - see MULTICLOUD-634
+ ansible_version: 2.7.10
+diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+index 9966ba8..cacb4b3 100644
+--- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
++++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+@@ -48,7 +48,7 @@ local_volumes_enabled: true
+ local_volume_provisioner_enabled: true
+
+ ## Change this to use another Kubernetes version, e.g. a current beta release
+-kube_version: v1.12.3
++kube_version: v1.13.5
+
+ # Helm deployment
+ helm_enabled: true
+```
+
+After upgraded, the Kubernetes version as following:
+```
+$ kubectl version
+Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.5", GitCommit:"2166946f41b36dea2c4626f90a77706f426cdea2", GitTreeState:"clean", BuildDate:"2019-03-25T15:19:22Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
+Server Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.5", GitCommit:"2166946f41b36dea2c4626f90a77706f426cdea2", GitTreeState:"clean", BuildDate:"2019-03-25T15:19:22Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
+```
+
+If something is wrong with Kubectl server version, you can manually upgrade as
+command:
+```console
+$ kubeadm upgrade apply v1.13.5
+```
+
+# Deployment
+
+To bring up Rook operator(v1.0) and Ceph cluster(Mimic 13.2.2) as following:
+
+```console
+cd yaml
+./install.sh
+```
+
+# Test
+
+If you want to make a test on the ceph sample workload, check as following:
+
+1. Bring up Rook operator and Ceph cluster.
+2. Goto Create storage class.
+
+```console
+kubectl create -f ./test/rbd/storageclass.yaml
+```
+
+3. Create RBD secret.
+```console
+kubectl exec -ti -n rook-ceph rook-ceph-operator-948f8f84c-749zb -- bash -c 
+"ceph -c /var/lib/rook/rook-ceph/rook-ceph.config auth get-or-create-key client.kube mon \"allow profile rbd\" osd \"profile rbd pool=rbd\""
+```
+   You need to replace the pod name with your own rook-operator, refer: kubetl get pod -n rook-ceph
+   Then get secret of admin and client user key by go into operator pod and execute:
+```console
+ceph auth get-key client.admin|base64
+ceph auth get-key client.kube|base64
+```
+  Then fill the key into secret.yaml
+```console
+kubectl create -f ./test/rbd/secret.yaml
+```
+4. Create RBD Persistent Volume Claim
+```console
+kubectl create -f ./test/rbd/pvc.yaml
+```
+5. Create RBD demo pod
+```console
+kubectl creaet -f ./test/rbd/pod.yaml
+```
+6. Check the Volumes created and application mount status
+```console
+tingjie@ceph4:~/bohemian/workspace/rook/Documentation$ kubectl get pvc
+NAME      STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+rbd-pvc   Bound    pvc-98f50bec-8a4f-434d-8def-7b69b628d427   1Gi        RWO            csi-rbd        84m
+tingjie@ceph4:~/bohemian/workspace/rook/Documentation$ kubectl get pod
+NAME              READY   STATUS    RESTARTS   AGE
+csirbd-demo-pod   1/1     Running   0          84m
+tingjie@ceph4:~/bohemian/workspace/rook/Documentation$ kubectl exec -ti csirbd-demo-pod -- bash
+root@csirbd-demo-pod:/# df -h
+Filesystem      Size  Used Avail Use% Mounted on
+overlay         733G   35G  662G   5% /
+tmpfs            64M     0   64M   0% /dev
+tmpfs            32G     0   32G   0% /sys/fs/cgroup
+/dev/sda2       733G   35G  662G   5% /etc/hosts
+shm              64M     0   64M   0% /dev/shm
+/dev/rbd0       976M  2.6M  958M   1% /var/lib/www/html
+tmpfs            32G   12K   32G   1% /run/secrets/kubernetes.io/serviceaccount
+tmpfs            32G     0   32G   0% /proc/acpi
+tmpfs            32G     0   32G   0% /proc/scsi
+tmpfs            32G     0   32G   0% /sys/firmware
+```
+7. Create RBD snapshot-class
+```console
+kubectl create -f ./test/rbd/snapshotclass.yaml
+```
+8. Create Volume snapshot and verify
+```console
+kubectl create -f ./test/rbd/snapshot.yaml
+
+$ kubectl get volumesnapshotclass
+NAME                      AGE
+csi-rbdplugin-snapclass   51s
+$ kubectl get volumesnapshot
+NAME               AGE
+rbd-pvc-snapshot   33s
+
+```
+9. Restore the snapshot to a new PVC and verify
+```console
+kubectl create -f ./test/rbd/pvc-restore.yaml
+
+$ kubectl get pvc
+NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+rbd-pvc           Bound    pvc-98f50bec-8a4f-434d-8def-7b69b628d427   1Gi        RWO            csi-rbd        42h
+rbd-pvc-restore   Bound    pvc-530a4939-e4c0-428d-a072-c9c39d110d7a   1Gi        RWO            csi-rbd        5s
+```
+
diff --git a/deploy/kud-plugin-addons/rook/yaml/collect_rook_yaml.sh b/deploy/kud-plugin-addons/rook/yaml/collect_rook_yaml.sh
new file mode 100755 (executable)
index 0000000..1499c3f
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# usage: collect_rook_yaml.sh [target]
+
+set -ex
+
+if [ $# -ne 1 ] ; then
+    echo "Please input the target folder!"
+    exit 0
+fi
+
+VER="0.1"
+MKDIR_P="mkdir -p"
+target=$1
+temp=rook_yaml
+
+# copy to target
+$MKDIR_P $temp
+cp rook-common.yaml $temp/
+cp rook-operator-with-csi.yaml $temp/
+cp rook-ceph-cluster.yaml $temp/
+cp rook-toolbox.yaml $temp/
+cp -rf ./csi/ $temp/
+cp -rf ./test/ $temp/
+cp install.sh $temp/
+
+if [ ! -d $target/yaml ]; then
+    $MKDIR_P $target/yaml;
+fi;
+
+tar czvf $target/yaml/rook_yaml-$VER.tar.gz $temp/
+
+# clear
+rm -rf $temp
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml
new file mode 100644 (file)
index 0000000..5fb0bb1
--- /dev/null
@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-plugin-sa
+  namespace: rook-ceph
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-plugin-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: cephfs-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-provisioner-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-provisioner-rbac.yaml
new file mode 100644 (file)
index 0000000..fdcc18b
--- /dev/null
@@ -0,0 +1,55 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-provisioner-sa
+  namespace: rook-ceph
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: cephfs-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-nodeplugin-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-nodeplugin-rbac.yaml
new file mode 100644 (file)
index 0000000..d37d0cc
--- /dev/null
@@ -0,0 +1,53 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-plugin-sa
+  namespace: rook-ceph
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-plugin-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: rbd-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-provisioner-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-provisioner-rbac.yaml
new file mode 100644 (file)
index 0000000..028d7bd
--- /dev/null
@@ -0,0 +1,83 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-provisioner-sa
+  namespace: rook-ceph
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["get", "create", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotcontents"]
+    verbs: ["create", "get", "list", "watch", "update", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["apiextensions.k8s.io"]
+    resources: ["customresourcedefinitions"]
+    verbs: ["create"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: rbd-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/install.sh b/deploy/kud-plugin-addons/rook/yaml/install.sh
new file mode 100755 (executable)
index 0000000..00b9e04
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Make sure kubernetes server is up with network dns
+# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/62e44c867a2846fefb68bd5f178daf4da3095ccb/Documentation/kube-flannel.yml
+
+# Remove taint if have
+# kubectl taint nodes master node-role.kubernetes.io/master:NoSchedule-
+
+# Remove remaining config files of last deplpyment
+echo ""|sudo -S rm -rf /var/lib/rook/*
+
+# Create common CRD objects
+kubectl create -f rook-common.yaml
+
+# Create rbac, since rook operator is not permitted to create rbac rules, these
+# rules have to be created outside of operator
+kubectl apply -f ./csi/rbac/rbd/
+kubectl apply -f ./csi/rbac/cephfs/
+
+# Start rook ceph operator with csi support
+kubectl create -f rook-operator-with-csi.yaml
+
+# Bring up cluster with default configuration, current Ceph version is:
+# ceph/ceph:v14.2.1-20190430, and create osd with default /dev/sdb on each node
+kubectl create -f rook-ceph-cluster.yaml
+
+# Start toolbox containers with CLI support, to enter the bash env, use command:
+# kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') bash
+kubectl create -f rook-toolbox.yaml
+
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-ceph-cluster.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-ceph-cluster.yaml
new file mode 100644 (file)
index 0000000..0e1ffba
--- /dev/null
@@ -0,0 +1,125 @@
+#################################################################################################################
+# Define the settings for the rook-ceph cluster with common settings for a production cluster.
+# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
+# in this example. See the documentation for more details on storage settings available.
+#################################################################################################################
+
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+  name: rook-ceph
+  namespace: rook-ceph
+spec:
+  cephVersion:
+    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
+    # v12 is luminous, v13 is mimic, and v14 is nautilus.
+    # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
+    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+    image: ceph/ceph:v13.2.2-20190410
+    # Whether to allow unsupported versions of Ceph. Currently luminous, mimic and nautilus are supported, with the recommendation to upgrade to nautilus.
+    # Do not set to true in production.
+    allowUnsupported: false
+  # The path on the host where configuration files will be persisted. Must be specified.
+  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
+  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
+  dataDirHostPath: /var/lib/rook
+  # set the amount of mons to be started
+  mon:
+    count: 3
+    allowMultiplePerNode: true
+  # enable the ceph dashboard for viewing cluster status
+  dashboard:
+    enabled: true
+    # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
+    # urlPrefix: /ceph-dashboard
+    # serve the dashboard at the given port.
+    # port: 8443
+    # serve the dashboard using SSL
+    # ssl: true
+  network:
+    # toggle to use hostNetwork
+    hostNetwork: false
+  rbdMirroring:
+    # The number of daemons that will perform the rbd mirroring.
+    # rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
+    workers: 0
+  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
+  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
+  # tolerate taints with a key of 'storage-node'.
+#  placement:
+#    all:
+#      nodeAffinity:
+#        requiredDuringSchedulingIgnoredDuringExecution:
+#          nodeSelectorTerms:
+#          - matchExpressions:
+#            - key: role
+#              operator: In
+#              values:
+#              - storage-node
+#      podAffinity:
+#      podAntiAffinity:
+#      tolerations:
+#      - key: storage-node
+#        operator: Exists
+# The above placement information can also be specified for mon, osd, and mgr components
+#    mon:
+#    osd:
+#    mgr:
+  annotations:
+#    all:
+#    mon:
+#    osd:
+# If no mgr annotations are set, prometheus scrape annotations will be set by default.
+#   mgr:
+  resources:
+# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
+#    mgr:
+#      limits:
+#        cpu: "500m"
+#        memory: "1024Mi"
+#      requests:
+#        cpu: "500m"
+#        memory: "1024Mi"
+# The above example requests/limits can also be added to the mon and osd components
+#    mon:
+#    osd:
+  storage: # cluster level storage configuration and selection
+    useAllNodes: true
+    useAllDevices: false
+    deviceFilter:
+    location:
+    config:
+      # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
+      # Set the storeType explicitly only if it is required not to use the default.
+      # storeType: bluestore
+      metadataDevice: # "md0" specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
+      databaseSizeMB: "10240" # uncomment if the disks are smaller than 100 GB
+      journalSizeMB: "10240"  # uncomment if the disks are 20 GB or smaller
+      # osdsPerDevice: "1" # this value can be overridden at the node or device level
+      # encryptedDevice: "true" # the default value for this option is "false"
+# Cluster level list of directories to use for filestore-based OSD storage. If uncommented, this example would create an OSD under the dataDirHostPath.
+    directories:
+    - path: "/var/lib/rook/storage-dir"
+# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
+# nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+#    nodes:
+#    - name: "172.17.4.101"
+#      directories: # specific directories to use for storage can be specified for each node
+#      - path: "/rook/storage-dir"
+#      resources:
+#        limits:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#        requests:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#    - name: "172.17.4.201"
+#      devices: # specific devices to use for storage can be specified for each node
+#      - name: "sdb"
+#      - name: "nvme01" # multiple osds can be created on high performance devices
+#        config:
+#          osdsPerDevice: "5"
+#      config: # configuration can be specified at the node level which overrides the cluster level config
+#        storeType: filestore
+#    - name: "172.17.4.301"
+#      deviceFilter: "^sd."
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-common.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-common.yaml
new file mode 100644 (file)
index 0000000..e6366a0
--- /dev/null
@@ -0,0 +1,618 @@
+###################################################################################################################
+# Create the common resources that are necessary to start the operator and the ceph cluster.
+# These resources *must* be created before the operator.yaml and cluster.yaml or their variants.
+# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace.
+#
+# If the operator needs to manage multiple clusters (in different namespaces), see the section below
+# for "cluster-specific resources". The resources below that section will need to be created for each namespace
+# where the operator needs to manage the cluster. The resources above that section do not be created again.
+###################################################################################################################
+
+# Namespace where the operator and other rook resources are created
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: rook-ceph
+---
+# The CRD declarations
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephclusters.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephCluster
+    listKind: CephClusterList
+    plural: cephclusters
+    singular: cephcluster
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            cephVersion:
+              properties:
+                allowUnsupported:
+                  type: boolean
+                image:
+                  type: string
+                name:
+                  pattern: ^(luminous|mimic|nautilus)$
+                  type: string
+            dashboard:
+              properties:
+                enabled:
+                  type: boolean
+                urlPrefix:
+                  type: string
+                port:
+                  type: integer
+            dataDirHostPath:
+              pattern: ^/(\S+)
+              type: string
+            mon:
+              properties:
+                allowMultiplePerNode:
+                  type: boolean
+                count:
+                  maximum: 9
+                  minimum: 1
+                  type: integer
+                preferredCount:
+                  maximum: 9
+                  minimum: 0
+                  type: integer
+              required:
+              - count
+            network:
+              properties:
+                hostNetwork:
+                  type: boolean
+            storage:
+              properties:
+                nodes:
+                  items: {}
+                  type: array
+                useAllDevices: {}
+                useAllNodes:
+                  type: boolean
+          required:
+          - mon
+  additionalPrinterColumns:
+    - name: DataDirHostPath
+      type: string
+      description: Directory used on the K8s nodes
+      JSONPath: .spec.dataDirHostPath
+    - name: MonCount
+      type: string
+      description: Number of MONs
+      JSONPath: .spec.mon.count
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+    - name: State
+      type: string
+      description: Current State
+      JSONPath: .status.state
+    - name: Health
+      type: string
+      description: Ceph Health
+      JSONPath: .status.ceph.health
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephfilesystems.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephFilesystem
+    listKind: CephFilesystemList
+    plural: cephfilesystems
+    singular: cephfilesystem
+  scope: Namespaced
+  version: v1
+  additionalPrinterColumns:
+    - name: MdsCount
+      type: string
+      description: Number of MDSs
+      JSONPath: .spec.metadataServer.activeCount
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephnfses.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephNFS
+    listKind: CephNFSList
+    plural: cephnfses
+    singular: cephnfs
+    shortNames:
+    - nfs
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstores.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStore
+    listKind: CephObjectStoreList
+    plural: cephobjectstores
+    singular: cephobjectstore
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstoreusers.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStoreUser
+    listKind: CephObjectStoreUserList
+    plural: cephobjectstoreusers
+    singular: cephobjectstoreuser
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephblockpools.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephBlockPool
+    listKind: CephBlockPoolList
+    plural: cephblockpools
+    singular: cephblockpool
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: volumes.rook.io
+spec:
+  group: rook.io
+  names:
+    kind: Volume
+    listKind: VolumeList
+    plural: volumes
+    singular: volume
+    shortNames:
+    - rv
+  scope: Namespaced
+  version: v1alpha2
+---
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  - pods
+  - pods/log
+  - services
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  - daemonsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The role for the operator to manage resources in its own namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - configmaps
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - daemonsets
+  - statefulsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  # Pod access is needed for fencing
+  - pods
+  # Node access is needed for determining nodes where mons should run
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+    # PVs and PVCs are managed by the Rook provisioner
+  - persistentvolumes
+  - persistentvolumeclaims
+  - endpoints
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+- apiGroups:
+  - rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+---
+# The rook system service account used by the operator, agent, and discovery pods
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+---
+# Grant the operator, agent, and discovery agents access to resources in the namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-global
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-global
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+#################################################################################################################
+# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph"
+# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles
+# and bindings accordingly.
+#################################################################################################################
+# Service account for the Ceph OSDs. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+---
+# Service account for the Ceph Mgr. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+rules:
+- apiGroups: [""]
+  resources: ["configmaps"]
+  verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: rook-ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system-rules
+  namespace: rook-ceph
+  labels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+---
+  # Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cluster-mgmt
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-mgr
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-cluster
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-operator-with-csi.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-operator-with-csi.yaml
new file mode 100644 (file)
index 0000000..c34b879
--- /dev/null
@@ -0,0 +1,73 @@
+#################################################################################################################
+# The deployment for the rook operator that enables the ceph-csi driver for beta testing.
+# For example, to create the rook-ceph cluster:
+#################################################################################################################
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-operator
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+spec:
+  selector:
+    matchLabels:
+      app: rook-ceph-operator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-operator
+    spec:
+      serviceAccountName: rook-ceph-system
+      containers:
+      - name: rook-ceph-operator
+        image: rook/ceph:v1.0.4
+        args: ["ceph", "operator"]
+        volumeMounts:
+        - mountPath: /var/lib/rook
+          name: rook-config
+        - mountPath: /etc/ceph
+          name: default-config-dir
+        env:
+        - name: ROOK_CURRENT_NAMESPACE_ONLY
+          value: "true"
+        # CSI enablement
+        - name: ROOK_CSI_ENABLE_CEPHFS
+          value: "true"
+        - name: ROOK_CSI_CEPHFS_IMAGE
+          value: "quay.io/cephcsi/cephfsplugin:v1.0.0"
+        - name: ROOK_CSI_ENABLE_RBD
+          value: "true"
+        - name: ROOK_CSI_RBD_IMAGE
+          value: "quay.io/cephcsi/rbdplugin:v1.0.0"
+        - name: ROOK_CSI_REGISTRAR_IMAGE
+          value: "quay.io/k8scsi/csi-node-driver-registrar:v1.0.2"
+        - name: ROOK_CSI_PROVISIONER_IMAGE
+          value: "quay.io/k8scsi/csi-provisioner:v1.0.1"
+        - name: ROOK_CSI_SNAPSHOTTER_IMAGE
+          value: "quay.io/k8scsi/csi-snapshotter:v1.0.1"
+        - name: ROOK_CSI_ATTACHER_IMAGE
+          value: "quay.io/k8scsi/csi-attacher:v1.0.1"
+        # The name of the node to pass with the downward API
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        # The pod name to pass with the downward API
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        # The pod namespace to pass with the downward API
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+      volumes:
+      - name: rook-config
+        emptyDir: {}
+      - name: default-config-dir
+        emptyDir: {}
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-toolbox.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-toolbox.yaml
new file mode 100644 (file)
index 0000000..de442f0
--- /dev/null
@@ -0,0 +1,59 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-tools
+  namespace: rook-ceph
+  labels:
+    app: rook-ceph-tools
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: rook-ceph-tools
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-tools
+    spec:
+      dnsPolicy: ClusterFirstWithHostNet
+      containers:
+      - name: rook-ceph-tools
+        image: rook/ceph:v1.0.4
+        command: ["/tini"]
+        args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
+        imagePullPolicy: IfNotPresent
+        env:
+          - name: ROOK_ADMIN_SECRET
+            valueFrom:
+              secretKeyRef:
+                name: rook-ceph-mon
+                key: admin-secret
+        securityContext:
+          privileged: true
+        volumeMounts:
+          - mountPath: /dev
+            name: dev
+          - mountPath: /sys/bus
+            name: sysbus
+          - mountPath: /lib/modules
+            name: libmodules
+          - name: mon-endpoint-volume
+            mountPath: /etc/rook
+      # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
+      hostNetwork: true
+      volumes:
+        - name: dev
+          hostPath:
+            path: /dev
+        - name: sysbus
+          hostPath:
+            path: /sys/bus
+        - name: libmodules
+          hostPath:
+            path: /lib/modules
+        - name: mon-endpoint-volume
+          configMap:
+            name: rook-ceph-mon-endpoints
+            items:
+            - key: data
+              path: mon-endpoints
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/pod.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/pod.yaml
new file mode 100644 (file)
index 0000000..3a75fb9
--- /dev/null
@@ -0,0 +1,17 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: csirbd-demo-pod
+spec:
+  containers:
+   - name: web-server
+     image: nginx
+     volumeMounts:
+       - name: mypvc
+         mountPath: /var/lib/www/html
+  volumes:
+   - name: mypvc
+     persistentVolumeClaim:
+       claimName: rbd-pvc
+       readOnly: false
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc-restore.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc-restore.yaml
new file mode 100644 (file)
index 0000000..1fc02d5
--- /dev/null
@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: rbd-pvc-restore
+spec:
+  storageClassName: csi-rbd
+  dataSource:
+    name: rbd-pvc-snapshot
+    kind: VolumeSnapshot
+    apiGroup: snapshot.storage.k8s.io
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc.yaml
new file mode 100644 (file)
index 0000000..3115642
--- /dev/null
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: rbd-pvc
+spec:
+  accessModes:
+  - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+  storageClassName: csi-rbd
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/secret.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/secret.yaml
new file mode 100644 (file)
index 0000000..89c8fe5
--- /dev/null
@@ -0,0 +1,14 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: csi-rbd-secret
+  namespace: default
+data:
+  # Key value corresponds to a user name defined in Ceph cluster
+  admin: "QVFCQzExQmRuTVp0RVJBQW9FWDJmQ1RkTFQ1QWZ4SlU0OHFLc3c9PQ=="
+  # Key value corresponds to a user name defined in Ceph cluster
+  kube: "QVFBOHJGTmRzeDluQ3hBQW1zRXJkT3gybWYyTTQxTzVidG9ONlE9PQ=="
+  # if monValueFromSecret is set to "monitors", uncomment the
+  # following and set the mon there
+  #monitors: BASE64-ENCODED-Comma-Delimited-Mons
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshot.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshot.yaml
new file mode 100644 (file)
index 0000000..f8ba153
--- /dev/null
@@ -0,0 +1,10 @@
+---
+apiVersion: snapshot.storage.k8s.io/v1alpha1
+kind: VolumeSnapshot
+metadata:
+  name: rbd-pvc-snapshot
+spec:
+  snapshotClassName: csi-rbdplugin-snapclass
+  source:
+    name: rbd-pvc
+    kind: PersistentVolumeClaim
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshotclass.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshotclass.yaml
new file mode 100644 (file)
index 0000000..03e52da
--- /dev/null
@@ -0,0 +1,11 @@
+---
+apiVersion: snapshot.storage.k8s.io/v1alpha1
+kind: VolumeSnapshotClass
+metadata:
+  name: csi-rbdplugin-snapclass
+snapshotter: rbd.csi.ceph.com
+parameters:
+  pool: rbd
+  monitors: 10.111.122.22:6789/0,10.104.227.175:6789/0,10.98.129.229:6789/0
+  csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
+  csi.storage.k8s.io/snapshotter-secret-namespace: default
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/storageclass.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/storageclass.yaml
new file mode 100644 (file)
index 0000000..ae8c30d
--- /dev/null
@@ -0,0 +1,45 @@
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+  name: rbd
+  namespace: rook-ceph
+spec:
+  replicated:
+    size: 3
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+   name: csi-rbd
+provisioner: rbd.csi.ceph.com
+parameters:
+    # Comma separated list of Ceph monitors
+    # if using FQDN, make sure csi plugin's dns policy is appropriate.
+    monitors: 10.233.47.29:6789/0,10.233.23.25:6789/0,10.233.48.241:6789/0
+
+    # if "monitors" parameter is not set, driver to get monitors from same
+    # secret as admin/user credentials. "monValueFromSecret" provides the
+    # key in the secret whose value is the mons
+    #monValueFromSecret: "monitors"
+    
+    # Ceph pool into which the RBD image shall be created
+    pool: rbd
+
+    # RBD image format. Defaults to "2".
+    imageFormat: "2"
+
+    # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
+    imageFeatures: layering
+    
+    # The secrets have to contain Ceph admin credentials.
+    csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
+    csi.storage.k8s.io/provisioner-secret-namespace: default
+    csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret
+    csi.storage.k8s.io/node-publish-secret-namespace: default
+
+    # Ceph users for operating RBD
+    adminid: admin
+    userid: kube
+    # uncomment the following to use rbd-nbd as mounter on supported nodes
+    #mounter: rbd-nbd
+reclaimPolicy: Delete
diff --git a/deploy/metal3/scripts/metal3.sh b/deploy/metal3/scripts/metal3.sh
new file mode 100755 (executable)
index 0000000..f268d8b
--- /dev/null
@@ -0,0 +1,167 @@
+#!/bin/bash
+set -ex
+
+LIBDIR="$(dirname "$(dirname "$(dirname "$PWD")")")"
+
+eval "$(go env)"
+
+BM_OPERATOR="${BM_OPERATOR:-https://github.com/metal3-io/baremetal-operator.git}"
+
+source $LIBDIR/env/lib/common.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "This script must be run as root"
+    exit 1
+fi
+
+function get_default_inteface_ipaddress() {
+    local _ip=$1
+    local _default_interface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route)
+    local _ipv4address=$(ip addr show dev $_default_interface | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }')
+    eval $_ip="'$_ipv4address'"
+}
+
+create_ssh_key() {
+       #ssh key for compute node to communicate back to bootstrap server
+       mkdir -p $BUILD_DIR/ssh_key
+       ssh-keygen -C "compute.icn.akraino.lfedge.org" -f $BUILD_DIR/ssh_key/id_rsa
+       cat $BUILD_DIR/ssh_key/id_rsa.pub >> $HOME/.ssh/authorized_keys
+}
+
+set_compute_key() {
+_SSH_LOCAL_KEY=$(cat $BUILD_DIR/ssh_key/id_rsa)
+cat << EOF
+write_files:
+- path: /opt/ssh_id_rsa
+  owner: root:root
+  permissions: '0600'
+  content: |
+    $_SSH_LOCAL_KEY
+EOF
+}
+
+provision_compute_node() {
+       IMAGE_URL=http://172.22.0.1/images/${BM_IMAGE}
+       IMAGE_CHECKSUM=http://172.22.0.1/images/${BM_IMAGE}.md5sum
+
+       if [ ! -d $GOPATH/src/github.com/metal3-io/baremetal-operator ]; then
+               go get github.com/metal3-io/baremetal-operator
+       fi
+
+       go run $GOPATH/src/github.com/metal3-io/baremetal-operator/cmd/make-bm-worker/main.go \
+           -address "ipmi://$COMPUTE_IPMI_ADDRESS" \
+                  -user "$COMPUTE_IPMI_USER" \
+           -password "$COMPUTE_IPMI_PASSWORD" \
+           "$COMPUTE_NODE_NAME" > $COMPUTE_NODE_NAME-bm-node.yaml
+
+       printf "  image:" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n    url: ""%s" "$IMAGE_URL" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n    checksum: ""%s" "$IMAGE_CHECKSUM" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n  userData:" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n    name: ""%s" "$COMPUTE_NODE_NAME""-user-data" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n    namespace: metal3\n" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       kubectl apply -f $COMPUTE_NODE_NAME-bm-node.yaml
+}
+
+deprovision_compute_node() {
+       kubectl patch baremetalhost $COMPUTE_NODE_NAME -n metal3 --type merge \
+    -p '{"spec":{"image":{"url":"","checksum":""}}}'
+}
+
+set_compute_ssh_config() {
+get_default_inteface_ipaddress default_addr
+cat << EOF
+- path: /root/.ssh/config
+  owner: root:root
+  permissions: '0600'
+  content: |
+    Host bootstrapmachine $default_addr
+    HostName $default_addr
+    IdentityFile /opt/ssh_id_rsa
+    User $USER
+- path: /etc/apt/sources.list
+  owner: root:root
+  permissions: '0665'
+  content: |
+       deb [trusted=yes] ssh://$USER@$default_addr:$LOCAL_APT_REPO ./
+EOF
+}
+
+create_userdata() {
+       printf "#cloud-config\n" > userdata.yaml
+       if [ -n "$COMPUTE_NODE_PASSWORD" ]; then
+               printf "password: ""%s" "$COMPUTE_NODE_PASSWORD" >> userdata.yaml
+               printf "\nchpasswd: {expire: False}\n" >> userdata.yaml
+               printf "ssh_pwauth: True\n" >> userdata.yaml
+       fi
+
+       if [ -n "$COMPUTE_NODE_FQDN" ]; then
+               printf "fqdn: ""%s" "$COMPUTE_NODE_FQDN" >> userdata.yaml
+               printf "\n" >> userdata.yaml
+       fi
+
+       printf "ssh_authorized_keys:\n  - " >> userdata.yaml
+
+       if [ -f $HOME/.ssh/id_rsa.pub ]; then
+               yes y | ssh-keygen -t rsa -N "" -f $HOME/.ssh/id_rsa
+       fi
+
+       cat $HOME/.ssh/id_rsa.pub >> userdata.yaml
+       printf "\n" >> userdata.yaml
+}
+
+apply_userdata_credential() {
+       cat <<EOF > ./$COMPUTE_NODE_NAME-user-data.yaml
+apiVersion: v1
+data:
+  userData: $(base64 -w 0 userdata.yaml)
+kind: Secret
+metadata:
+  name: $COMPUTE_NODE_NAME-user-data
+  namespace: metal3
+type: Opaque
+EOF
+       kubectl apply -n metal3 -f $COMPUTE_NODE_NAME-user-data.yaml
+}
+
+launch_baremetal_operator() {
+       if [ ! -d $GOPATH/src/github.com/metal3-io/baremetal-operator ]; then
+        go get github.com/metal3-io/baremetal-operator
+    fi
+
+       pushd $GOPATH/src/github.com/metal3-io/baremetal-operator
+               make deploy
+       popd
+               
+}
+
+if [ "$1" == "launch" ]; then
+    launch_baremetal_operator
+    exit 0
+fi
+
+if [ "$1" == "deprovision" ]; then
+    deprovision_compute_node
+    exit 0
+fi
+
+if [ "$1" == "provision" ]; then
+    create_userdata
+       apply_userdata_credential
+       provision_compute_node
+    exit 0
+fi
+
+
+echo "Usage: metal3.sh"
+echo "launch      - Launch the metal3 operator"
+echo "provision   - provision baremetal node as specified in common.sh"
+echo "deprovision - deprovision baremetal node as specified in common.sh"
+exit 1
+
+#Following code is tested for the offline mode
+#Will be intergrated for the offline mode for ICNi v.0.1.0 beta
+#create_ssh_key
+#create_userdata
+#set_compute_key
+#set_compute_ssh_config
diff --git a/env/01_install_package.sh b/env/01_install_package.sh
deleted file mode 100644 (file)
index 3e369c3..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/bin/env bash
-set -ex
-
-
diff --git a/env/02_configure.sh b/env/02_configure.sh
deleted file mode 100644 (file)
index c1ddb47..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/usr/bin/env bash
-set -xe
diff --git a/env/03_launch_prereq.sh b/env/03_launch_prereq.sh
deleted file mode 100644 (file)
index d2577bb..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-set -xe
old mode 100644 (file)
new mode 100755 (executable)
index e69de29..0d589a5
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+#supported OS version
+UBUNTU_BIONIC=${UBUNTU_BIONIC:-Ubuntu 18.04.2 LTS}
+
+#offline mode variable
+DOWNLOAD_PATH=${DOWNLOAD_PATH:-/opt/icn/}
+LOCAL_APT_REPO=${LOCAL_APT_REPO:-$DOWNLOAD_PATH/apt}
+PIP_CACHE_DIR=${PIP_CACHE_DIR:-$DOWNLOAD_PATH/pip-cache-dir}
+BUILD_DIR=${BUILD_DIR:-$DOWNLOAD_PATH/build-dir}
+CONTAINER_IMAGES_DIR=${CONTAINER_IMAGES_DIR:-$OFFLINE_DOWNLOAD_PATH/docker-dir}
+
+#set variables
+#Todo include over all variables here
+KUBE_VERSION=${KUBE_VERSION:-"v1.15.0"}
+POD_NETWORK_CIDR=${POD_NETWORK_CIDR:-"10.244.0.0/16"}
+PODMAN_CNI_CONFLIST=${PODMAN_CNI_CONFLIST:-"https://raw.githubusercontent.com/containers/libpod/v1.4.4/cni/87-podman-bridge.conflist"}
+
+#Bootstrap K8s cluster
+
+
+#Ironic variables
+IRONIC_IMAGE=${IRONIC_IMAGE:-"quay.io/metal3-io/ironic:master"}
+IRONIC_INSPECTOR_IMAGE=${IRONIC_INSPECTOR_IMAGE:-"quay.io/metal3-io/ironic-inspector"}
+IRONIC_BAREMETAL_IMAGE=${IRONIC_BAREMETAL_IMAGE:-"quay.io/metal3-io/baremetal-operator:master"}
+IRONIC_BAREMETAL_SOCAT_IMAGE=${IRONIC_BAREMETAL_SOCAT_IMAGE:-"alpine/socat:latest"}
+
+IRONIC_DATA_DIR=${IRONIC_DATA_DIR:-"/opt/ironic"}
+#IRONIC_PROVISIONING_INTERFACE is required to be provisioning, don't change it
+IRONIC_PROVISIONING_INTERFACE=${IRONIC_PROVISIONING_INTERFACE:-"provisioning"}
+IRONIC_IPMI_INTERFACE=${IRONIC_IPMI_INTERFACE:-"eno1"}
+IRONIC_PROVISIONING_INTERFACE_IP=${IRONIC_PROVISIONING_INTERFACE_IP:-"172.22.0.1"}
+IRONIC_IPMI_INTERFACE_IP=${IRONIC_IPMI_INTERFACE_IP:-"172.31.1.9"}
+BM_IMAGE_URL=${BM_IMAGE_URL:-"https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img"}
+BM_IMAGE=${BM_IMAGE:-"bionic-server-cloudimg-amd64.img"}
+
+#Todo change into nodes list in json pattern
+COMPUTE_NODE_NAME=${COMPUTE_NODE_NAME:-"el-100-node-01"}
+COMPUTE_IPMI_ADDRESS=${COMPUTE_IPMI_ADDRESS:-"172.31.1.17"}
+COMPUTE_IPMI_USER=${COMPUTE_IPMI_USER:-"ryeleswa"}
+COMPUTE_IPMI_PASSWORD=${COMPUTE_IPMI_PASSWORD:-"changeme1"}
+COMPUTE_NODE_FQDN=${COMPUTE_NODE_FQDN:-"node01.akraino.org"}
+#COMPUTE_NODE_HOSTNAME=${COMPUTE_NODE_HOSTNAME:-"node01"}
+COMPUTE_NODE_PASSWORD=${COMPUTE_NODE_PASSWORD:-"mypasswd"}
old mode 100644 (file)
new mode 100755 (executable)
index e69de29..40a29f8
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# Log output automatically
+# referred from metal3 project
+LOGDIR="$(dirname $0)/logs"
+if [ ! -d "$LOGDIR" ]; then
+    mkdir -p "$LOGDIR"
+fi
+LOGFILE="$LOGDIR/$(basename $0 .sh)-$(date +%F-%H%M%S).log"
+exec 1> >( tee "${LOGFILE}" ) 2>&1
diff --git a/env/metal3/01_install_package.sh b/env/metal3/01_install_package.sh
new file mode 100755 (executable)
index 0000000..36beb04
--- /dev/null
@@ -0,0 +1,113 @@
+#!/usr/bin/env bash
+set -ex
+
+LIBDIR="$(dirname "$PWD")"
+
+source $LIBDIR/lib/common.sh
+source $LIBDIR/lib/logging.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "This script must be run as root"
+    exit 1
+fi
+
+function install_essential_packages() {
+    apt-get update
+    apt-get -y install \
+               crudini \
+               curl \
+               dnsmasq \
+               figlet \
+               golang \
+               nmap \
+               patch \
+               psmisc \
+               python-pip \
+               python-requests \
+               python-setuptools \
+               vim \
+               wget
+}
+
+function install_ironic_packages() {
+    apt-get update
+    apt-get -y install \
+               jq \
+               nodejs \
+               python-ironicclient \
+               python-ironic-inspector-client \
+               python-lxml \
+               python-netaddr \
+               python-openstackclient \
+               unzip \
+               genisoimage
+
+       if [ "$1" == "offline" ]; then
+               pip install --no-index
+                       --find-links=file:$PIP_CACHE_DIR locat yq
+               return
+       fi
+
+    pip install \
+               lolcat \
+               yq
+}
+
+function install_docker_packages() {
+    apt-get remove docker \
+               docker-engine \
+               docker.io \
+               containerd \
+               runc
+    apt-get update
+    apt-get -y install \
+               apt-transport-https \
+               ca-certificates \
+               curl \
+               gnupg-agent \
+               software-properties-common
+       if [ "$1" != "offline" ]; then
+               curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+               add-apt-repository \
+                       "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+                       $(lsb_release -cs) \
+                       stable"
+               apt-get update
+       fi
+    apt-get -y install docker-ce=18.06.0~ce~3-0~ubuntu
+}
+
+function install_podman_packages() {
+       if [ "$1" != "offline" ]; then
+       add-apt-repository -y ppa:projectatomic/ppa
+               apt-get update
+       fi
+    apt-get -y install podman
+}
+
+function install_kubernetes_packages() {
+       if [ "$1" != "offline" ]; then
+               curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+               bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF'
+               apt-get update
+       fi
+       apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
+       apt-mark hold kubelet kubeadm kubectl
+}
+
+install() {
+       install_essential_packages
+       install_ironic_packages $1
+       install_docker_packages $1
+       install_podman_packages $1
+       install_kubernetes_packages $1
+}
+
+if ["$1" == "-o"]; then
+       install offline
+       exit 0
+fi
+
+install
diff --git a/env/metal3/02_configure.sh b/env/metal3/02_configure.sh
new file mode 100755 (executable)
index 0000000..15864d6
--- /dev/null
@@ -0,0 +1,150 @@
+#!/usr/bin/env bash
+set -xe
+LIBDIR="$(dirname "$PWD")"
+
+source $LIBDIR/lib/logging.sh
+source $LIBDIR/lib/common.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "confgiure script must be run as root"
+    exit 1
+fi
+
+function check_inteface_ip() {
+       local interface=$1
+       local ipaddr=$2
+
+    if [ ! $(ip addr show dev $interface) ]; then
+        exit 1
+    fi
+
+    local ipv4address=$(ip addr show dev $interface | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }')
+    if [ "$ipv4address" != "$ipaddr" ]; then
+        exit 1
+    fi
+}
+
+function configure_kubelet() {
+       swapoff -a
+       #Todo addition kubelet configuration
+}
+
+function configure_kubeadm() {
+       #Todo error handing
+       if [ "$1" == "offline" ]; then
+               for images in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do
+               docker load --input $CONTAINER_IMAGES_DIR/$images.tar;
+               done
+
+               docker load --input $CONTAINER_IMAGES_DIR/pause.tar
+               docker load --input $CONTAINER_IMAGES_DIR/etcd.tar
+               docker load --input $CONTAINER_IMAGES_DIR/coredns.tar
+        return
+    fi
+       kubeadm config images pull --kubernetes-version=$KUBE_VERSION
+}
+
+function configure_ironic_interfaces() {
+       #Todo later to change the CNI networking for podman networking
+       # Add firewall rules to ensure the IPA ramdisk can reach httpd, Ironic and the Inspector API on the host
+       if [ "$IRONIC_PROVISIONING_INTERFACE" ]; then
+               check_inteface_ip $IRONIC_PROVISIONING_INTERFACE $IRONIC_PROVISIONING_INTERFACE_IP      
+       else
+               exit 1
+
+       fi
+
+       if [ "$IRONIC_IPMI_INTERFACE" ]; then
+        check_inteface_ip $IRONIC_IPMI_INTERFACE $IRONIC_IPMI_INTERFACE_IP
+    else
+        exit 1
+    fi
+
+       for port in 80 5050 6385 ; do
+       if ! sudo iptables -C INPUT -i $IRONIC_PROVISIONING_INTERFACE -p tcp -m tcp --dport $port -j ACCEPT > /dev/null 2>&1; then
+               sudo iptables -I INPUT -i $IRONIC_PROVISIONING_INTERFACE -p tcp -m tcp --dport $port -j ACCEPT
+       fi
+       done
+
+       # Allow ipmi to the bmc processes
+       if ! sudo iptables -C INPUT -i $IRONIC_IPMI_INTERFACE -p udp -m udp --dport 6230:6235 -j ACCEPT 2>/dev/null ; then
+       sudo iptables -I INPUT -i $IRONIC_IPMI_INTERFACE -p udp -m udp --dport 6230:6235 -j ACCEPT
+       fi
+
+       #Allow access to dhcp and tftp server for pxeboot
+       for port in 67 69 ; do
+       if ! sudo iptables -C INPUT -i $IRONIC_PROVISIONING_INTERFACE -p udp --dport $port -j ACCEPT 2>/dev/null ; then
+               sudo iptables -I INPUT -i $IRONIC_PROVISIONING_INTERFACE -p udp --dport $port -j ACCEPT
+       fi
+       done
+}
+
+function configure_ironic_offline() {
+       if [ ! -d $CONTAINER_IMAGES_DIR ] && [ ! -d $BUILD_DIR ]; then
+               exit 1  
+       fi
+
+       for image in ironic-inspector-image ironic-image podman-pause \
+               baremetal-operator socat; do
+               if [ ! -f "$CONTAINER_IMAGES_DIR/$image" ]; then
+                       exit 1
+               fi
+       done
+
+       if [ ! -f "$BUILD_DIR/ironic-python-agent.initramfs"] && [ ! -f \
+               "$BUILD_DIR/ironic-python-agent.kernel" ] && [ ! -f
+               "$BUILD_DIR/$BM_IMAGE" ]; then
+               exit 1
+       fi
+
+       podman load --input $CONTAINER_IMAGES_DIR/ironic-inspector-image.tar
+       podman load --input $CONTAINER_IMAGES_DIR/ironic-image.tar
+       podman load --input $CONTAINER_IMAGES_DIR/podman-pause.tar
+
+       docker load --input $CONTAINER_IMAGES_DIR/baremetal-operator.tar
+       docker load --input $CONTAINER_IMAGES_DIR/socat.tar
+
+       mkdir -p "$IRONIC_DATA_DIR/html/images"
+
+       cp $BUILD_DIR/ironic-python-agent.initramfs $IRONIC_DATA_DIR/html/images/
+       cp $BUILD_DIR/ironic-python-agent.kernel $IRONIC_DATA_DIR/html/images/
+       cp $BUILD_DIR/$BM_IMAGE $IRONIC_DATA_DIR/html/images/
+       md5sum $BUILD_DIR/$BM_IMAGE | awk '{print $1}' > $BUILD_DIR/${BM_IMAGE}.md5sum
+}
+
+function configure_ironic() {
+       if [ "$1" == "offline" ]; then
+               configure_ironic_offline
+               return
+       fi
+
+       podman pull $IRONIC_IMAGE
+       podman pull $IRONIC_INSPECTOR_IMAGE
+       
+       mkdir -p "$IRONIC_DATA_DIR/html/images"
+       pushd $IRONIC_DATA_DIR/html/images
+       
+       if [ ! -f ironic-python-agent.initramfs ]; then
+               curl --insecure --compressed -L https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo/ironic-python-agent.tar | tar -xf -
+       fi
+       
+       if [[ "$BM_IMAGE_URL" && "$BM_IMAGE" ]]; then
+       curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
+       md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
+       fi
+       popd
+}
+
+function configure() {
+       configure_kubeadm $1
+       configure_kubelet
+       configure_ironic_interfaces
+       configure_ironic $1
+}
+
+if [ "$1" == "-o" ]; then
+    configure offline
+    exit 0
+fi
+
+configure
diff --git a/env/metal3/03_launch_prereq.sh b/env/metal3/03_launch_prereq.sh
new file mode 100755 (executable)
index 0000000..95c17f0
--- /dev/null
@@ -0,0 +1,143 @@
+#!/bin/bash
+set -xe
+
+LIBDIR="$(dirname "$PWD")"
+
+source $LIBDIR/lib/logging.sh
+source $LIBDIR/lib/common.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "launch script must be run as root"
+    exit 1
+fi
+
+function get_default_inteface_ipaddress() {
+       local _ip=$1
+       local _default_interface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route)
+       local _ipv4address=$(ip addr show dev $_default_interface | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }')
+       eval $_ip="'$_ipv4address'"
+}
+
+
+
+function check_cni_network() {
+       #since bootstrap cluster is a single node cluster,
+       #podman and bootstap cluster have same network configuration to avoid the cni network conf conflicts
+       if [ ! -d "/etc/cni/net.d" ]; then
+               mkdir -p "/etc/cni/net.d"
+       fi
+
+       if [ ! -f "/etc/cni/net.d/87-podman-bridge.conflist" ]; then
+               if [ "$1" == "offline" ]; then
+                       cp $BUILD_DIR/87-podman-bridge.conflist /etc/cni/net.d/
+                       return
+       fi
+
+               if !(wget $PODMAN_CNI_CONFLIST -P /etc/cni/net.d/); then
+                       exit 1
+               fi
+       fi
+}
+
+function create_k8s_regular_user() {
+       if [ ! -d "$HOME/.kube" ]; then
+               mkdir -p $HOME/.kube
+       fi
+
+       if [ ! -f /etc/kubernetes/admin.conf]; then
+               exit 1
+       fi
+
+       cp -rf /etc/kubernetes/admin.conf $HOME/.kube/config
+       chown $(id -u):$(id -g) $HOME/.kube/config
+}
+
+function check_k8s_node_status(){
+       echo 'checking bootstrap cluster single node status'
+       node_status="False"
+
+       for i in {1..5}
+               do
+                       check_node=$(kubectl get node -o \
+                                               jsonpath='{.items[0].status.conditions[?(@.reason == "KubeletReady")].status}')
+                       if [ $check_node != "" ]; then
+                               node_status=${check_node}
+                       fi
+
+                       if [ $node_status == "True" ]; then
+                               break
+                       fi
+
+                       sleep 3
+               done
+
+       if [ $node_status != "True" ]; then
+               echo "bootstrap cluster single node status is not ready"
+               exit 1
+       fi
+}
+
+function install_podman() {
+       # set password for mariadb
+       mariadb_password=$(echo $(date;hostname)|sha256sum |cut -c-20)
+
+       # Create pod
+       podman pod create -n ironic-pod
+
+       # Start dnsmasq, http, mariadb, and ironic containers using same image
+       podman run -d --net host --privileged --name dnsmasq  --pod ironic-pod \
+               -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/rundnsmasq ${IRONIC_IMAGE}
+
+       podman run -d --net host --privileged --name httpd --pod ironic-pod \
+       -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runhttpd ${IRONIC_IMAGE}
+
+       podman run -d --net host --privileged --name mariadb --pod ironic-pod \
+       -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runmariadb \
+       --env MARIADB_PASSWORD=$mariadb_password ${IRONIC_IMAGE}
+
+       podman run -d --net host --privileged --name ironic --pod ironic-pod \
+       --env MARIADB_PASSWORD=$mariadb_password \
+       -v $IRONIC_DATA_DIR:/shared ${IRONIC_IMAGE}
+
+       # Start Ironic Inspector
+       podman run -d --net host --privileged --name ironic-inspector \
+               --pod ironic-pod "${IRONIC_INSPECTOR_IMAGE}"
+}
+
+function remove_k8s_noschedule_taint() {
+       #Bootstrap cluster is a single node
+       nodename=$(kubectl get node -o jsonpath='{.items[0].metadata.name}')
+       if !(kubectl taint node $nodename node-role.kubernetes.io/master:NoSchedule-); then
+               exit 1
+       fi
+}
+
+function install_k8s_single_node() {
+       get_default_inteface_ipaddress apiserver_advertise_addr
+       kubeadm_init="kubeadm init --kubernetes-version=$KUBE_VERSION \
+                                       --pod-network-cidr=$POD_NETWORK_CIDR \
+                                       --apiserver-advertise-address=$apiserver_advertise_addr"
+       if !(${kubeadm_init}); then
+               exit 1
+       fi
+}
+
+function install() {
+       #install_kubernetes
+       install_k8s_single_node
+       check_cni_network $1
+       create_k8s_regular_user
+       check_k8s_node_status
+       remove_k8s_noschedule_taint
+
+       #install_podman
+       #Todo - error handling mechanism
+       install_podman
+}
+
+if [ "$1" == "-o" ]; then
+    install offline
+    exit 0
+fi
+
+install
diff --git a/env/ubuntu/bootloader-env/01_bootloader_package_req.sh b/env/ubuntu/bootloader-env/01_bootloader_package_req.sh
new file mode 100755 (executable)
index 0000000..793fce1
--- /dev/null
@@ -0,0 +1,294 @@
+#!/usr/bin/env bash
+set -ex
+shopt -s extglob
+
+source $(dirname $PWD)/../lib/common.sh
+source $(dirname $PWD)/../lib/logging.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "This script must be run as root"
+    exit 1
+fi
+
+if [[ $(lsb_release -d | cut -f2) != $UBUNTU_BIONIC ]]; then
+    echo "Currently Ubuntu 18.04.2 LTS is only supported"
+    exit 1
+fi
+
+function download_essential_packages() {
+    apt-get update
+       for package in crudini curl dnsmasq figlet golang nmap patch psmisc \
+                       python-pip python-requests python-setuptools vim wget; do
+       apt-get -d install $package -y
+       done
+}
+
+function build_baremetal_operator_images() {
+       if [ ! -d "$BUILD_DIR/baremetal-operator"]; then
+               return
+       fi
+
+       pushd $BUILD_DIR/baremetal-operator
+       docker build -t $IRONIC_BAREMETAL_IMAGE . -f build/Dockerfile
+       docker save --output \
+               $CONTAINER_IMAGES_DIR/baremetal-operator.tar $IRONIC_BAREMETAL_IMAGE
+       popd
+
+       docker pull $IRONIC_BAREMETAL_SOCAT_IMAGE
+       docker save --output $CONTAINER_IMAGES_DIR/socat.tar $IRONIC_BAREMETAL_SOCAT_IMAGE
+}
+
+function build_ironic_images() {
+       for images in ironic-image ironic-inspector-image; do
+               if [ -d "$BUILD_DIR/$images" ]; then
+                       pushd $BUILD_DIR/$images
+                       podman build -t $images .
+                       popd
+               fi
+       done
+
+       if podman images -q localhost/ironic-inspector-image ; then
+               podman tag localhost/ironic-inspector-image $IRONIC_INSPECTOR_IMAGE
+               podman save --output \
+                       $CONTAINER_IMAGES_DIR/ironic-inspector-image.tar \
+                       $IRONIC_INSPECTOR_IMAGE
+       fi
+
+       if podman images -q localhost/ironic-image ; then
+        podman tag localhost/ironic-inspector-image $IRONIC_IMAGE
+               podman save --output $CONTAINER_IMAGES_DIR/ironic-image.tar \
+                       $IRONIC_IMAGE
+    fi
+       
+       podman pull k8s.gcr.io/pause:3.1
+       podman save --output $CONTAINER_IMAGES_DIR/podman-pause.tar \
+               k8s.gcr.io/pause:3.1
+
+       #build_baremetal_operator_images
+}
+
+
+function download_container_images() {
+       check_docker
+       pushd $CONTAINER_IMAGES_DIR
+       #docker images for Kubernetes
+       for images in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do
+               docker pull k8s.gcr.io/$images:v1.15.0; 
+               docker save --output $images.tar k8s.gcr.io/$images;
+       done
+
+       docker pull k8s.gcr.io/pause:3.1
+       docker save --output pause.tar k8s.gcr.io/pause
+
+       docker pull k8s.gcr.io/etcd:3.3.10
+       docker save --output etcd.tar k8s.gcr.io/etcd
+
+       docker pull k8s.gcr.io/coredns:1.3.1
+       docker save --output coredns.tar k8s.gcr.io/coredns
+
+       #podman images for Ironic
+       check_podman
+       build_ironic_images
+       #podman pull $IRONIC_IMAGE 
+       #podman save --output ironic.tar $IRONIC_IMAGE
+       #podman pull $IRONIC_INSPECTOR_IMAGE 
+       #podman save --output ironic-inspector.tar $IRONIC_INSPECTOR_IMAGE
+       popd
+}
+
+function download_build_packages() {
+       check_curl
+       pushd $BUILD_DIR
+       if [ ! -f ironic-python-agent.initramfs ]; then
+               curl --insecure --compressed \
+                       -L https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo/ironic-python-agent.tar | tar -xf -
+       fi
+
+       if [[ "$BM_IMAGE_URL" && "$BM_IMAGE" ]]; then
+               curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
+               md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
+    fi
+
+       if [ ! -f 87-podman-bridge.conflist ]; then
+               curl --insecure --compressed -O -L $PODMAN_CNI_CONFLIST
+       fi
+
+       if [ ! -d baremetal-operator ]; then
+               git clone https://github.com/metal3-io/baremetal-operator.git
+               pushd ./baremetal-operator
+               git checkout -b icn_baremetal_operator 11ea02ab5cab8b3ab14972ae7c0e70206bba00b5
+               popd
+       fi
+
+       if [ ! -d ironic-inspector-image ]; then
+               git clone https://github.com/metal3-io/ironic-inspector-image.git
+               pushd ./ironic-inspector-image
+               git checkout -b icn_ironic_inspector_image 25431bd5b7fc87c6f3cfb8b0431fe66b86bbab0e
+               popd
+       fi
+
+       if [ ! -d ironic-image ]; then
+               git clone https://github.com/metal3-io/ironic-image.git
+               pushd ./ironic-image
+               git checkout -b icn_ironic_image 329eb4542f0d8d0f0e9cf0d7e550e33b07efe7fb
+               popd
+       fi
+}
+
+function check_pip() {
+       if ! which pip ; then
+               apt-get install python-pip -y
+       fi
+}
+
+function check_curl() {
+       if ! which curl ; then
+        apt-get install curl -y
+    fi
+}
+
+function check_apt_tools() {
+       if ! which add-apt-repository ; then
+               apt-get install software-properties-common -y
+       fi
+}
+
+function download_ironic_packages() {
+       for package in jq nodejs python-ironicclient \
+                       python-ironic-inspector-client python-lxml python-netaddr \
+                       python-openstackclient unzip genisoimage; do
+       apt-get -d install $package -y
+       done
+       
+       check_pip    
+    pip download lolcat yq -d $PIP_CACHE_DIR
+}
+
+function check_docker() {
+       if which docker ; then
+               return
+       fi
+
+    apt-get remove -y docker \
+        docker-engine \
+        docker.io \
+        containerd \
+        runc \
+        docker-ce
+    apt-get update
+    for package in apt-transport-https ca-certificates gnupg-agent \
+            software-properties-common; do
+        apt-get -d install $package -y
+    done
+
+    check_curl
+    check_apt_tools
+    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+    add-apt-repository \
+        "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+        $(lsb_release -cs) \
+        stable"
+    apt-get update
+    apt-get install docker-ce=18.06.0~ce~3-0~ubuntu -y
+}
+
+function check_podman() {
+       if which podman; then
+               return
+       fi
+
+    add-apt-repository -y ppa:projectatomic/ppa
+       apt-get update
+    apt-get install podman -y
+}
+
+function download_docker_packages() {
+    apt-get remove -y docker \
+        docker-engine \
+        docker.io \
+        containerd \
+        runc \
+               docker-ce
+    apt-get update
+       for package in apt-transport-https ca-certificates gnupg-agent \
+                       software-properties-common; do
+       apt-get -d install $package -y
+       done
+
+       check_curl
+       check_apt_tools
+    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+    add-apt-repository \
+        "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+        $(lsb_release -cs) \
+        stable"
+    apt-get update
+    apt-get -d install docker-ce=18.06.0~ce~3-0~ubuntu -y
+}
+
+function download_podman_packages() {
+    apt-get update
+    add-apt-repository -y ppa:projectatomic/ppa
+    apt-get -d install podman -y
+}
+
+function download_kubernetes_packages() {
+   curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+   bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF'
+   apt-get update
+   apt-get install -d kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00 -y
+}
+
+function clean_apt_cache() {
+       pushd /var/cache/apt/archives
+
+       if [ $(ls -1q . | wc -l ) -ge 3 ]; then
+       $(rm !("lock"|"partial"))
+       fi
+       popd
+       
+}
+
+function mv_apt_cache() {
+    pushd /var/cache/apt/archives
+
+    if [ $(ls -1q . | wc -l ) -gt 2 ]; then
+        $(mv !("lock"|"partial") $LOCAL_APT_REPO)
+    fi
+    popd
+}
+
+function check_dir() {
+    if [ ! -d $1 ]; then
+        mkdir -p $1
+    fi
+}
+
+function clean_dir() {
+    pushd $1
+
+    if [ $(ls -1q . | wc -l ) -ne 0 ]; then
+        $(rm -r ./*)
+    fi
+    popd
+}
+
+clean_apt_cache
+check_dir $LOCAL_APT_REPO 
+clean_dir $LOCAL_APT_REPO 
+check_dir $PIP_CACHE_DIR
+clean_dir $PIP_CACHE_DIR
+check_dir $BUILD_DIR
+clean_dir $BUILD_DIR
+check_dir $CONTAINER_IMAGES_DIR
+clean_dir $CONTAINER_IMAGES_DIR
+download_essential_packages
+download_ironic_packages
+download_docker_packages
+download_podman_packages
+download_kubernetes_packages
+download_build_packages
+download_container_images
+mv_apt_cache
diff --git a/env/ubuntu/bootloader-env/02_clean_bootloader_package_req.sh b/env/ubuntu/bootloader-env/02_clean_bootloader_package_req.sh
new file mode 100755 (executable)
index 0000000..4154b6f
--- /dev/null
@@ -0,0 +1,144 @@
+#!/usr/bin/env bash
+set -ex
+
+source $(dirname $PWD)/../lib/common.sh
+source $(dirname $PWD)/../lib/logging.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "This script must be run as root"
+    exit 1
+fi
+
+if [[ $(lsb_release -d | cut -f2) != $UBUNTU_BIONIC ]]; then
+    echo "Currently Ubuntu 18.04.2 LTS is only supported"
+    exit 1
+fi
+
+function clean_essential_packages() {
+    apt-get update
+       for package in crudini curl dnsmasq figlet golang nmap patch psmisc \
+                       python-pip python-requests python-setuptools vim wget; do
+       apt-get remove $package -y
+       done
+
+       apt-get autoremove -y
+       rm -rf /etc/apt/sources.list.d/*
+}
+
+function check_prerequisite() {
+    if !(which pip); then
+        apt-get install python-pip -y
+    fi
+
+    if !(which curl); then
+        apt-get install curl -y
+    fi
+
+    if !(which add-apt-repository); then
+        apt-get install software-properties-common -y
+    fi
+}
+
+function clean_ironic_packages() {
+       for package in jq nodejs python-ironicclient \
+                       python-ironic-inspector-client python-lxml python-netaddr \
+                       python-openstackclient unzip genisoimage; do
+       apt-get remove $package -y
+       done    
+}
+
+function clean_docker_packages() {
+    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+    add-apt-repository \
+        "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+        $(lsb_release -cs) \
+        stable"
+    apt-get update
+    apt-get remove docker-ce -y
+       for package in apt-transport-https ca-certificates gnupg-agent \
+            software-properties-common; do
+        apt-get remove $package -y
+    done
+
+       apt-get remove -y docker \
+        docker-engine \
+        docker.io \
+        containerd \
+        runc \
+        docker-ce
+
+       apt-get update
+}
+
+function clean_podman_packages() {
+    apt-get update
+    add-apt-repository -y ppa:projectatomic/ppa
+    apt-get remove podman -y
+}
+
+function clean_kubernetes_packages() {
+       #Just to make sure kubernetes packages are removed during the download
+   curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+   bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF'
+   apt-get update
+   apt-get remove kubelet kubeadm kubectl -y
+}
+
+function clean_apt_cache() {
+       shopt -s extglob
+       pushd /var/cache/apt/archives
+
+       if [ $(ls -1q . | wc -l ) -ge 3 ]; then
+       $(rm !("lock"|"partial"))
+       fi
+       popd
+       
+}
+
+function mv_apt_cache() {
+       shopt -s extglob
+    pushd /var/cache/apt/archives
+
+    if [ $(ls -1q . | wc -l ) -gt 2 ]; then
+        $(mv !("lock"|"partial") $LOCAL_APT_REPO)
+    fi
+    popd
+}
+
+function check_dir() {
+    if [ ! -d $1 ]; then
+        mkdir -p $1
+    fi
+}
+
+function clean_dir() {
+       shopt -s extglob
+    pushd $1
+
+    if [ $(ls -1q . | wc -l ) -ne 0 ]; then
+        $(rm -r ./*)
+    fi
+    popd
+}
+
+check_prerequisite
+clean_apt_cache
+check_dir $LOCAL_APT_REPO
+clean_dir $LOCAL_APT_REPO
+check_dir $PIP_CACHE_DIR
+clean_dir $PIP_CACHE_DIR
+check_dir $BUILD_DIR
+clean_dir $BUILD_DIR
+check_dir $CONTAINER_IMAGES_DIR
+clean_dir $CONTAINER_IMAGES_DIR
+clean_kubernetes_packages
+clean_podman_packages
+clean_docker_packages
+clean_ironic_packages
+clean_essential_packages
+rm -rf $LOCAL_APT_REPO
+rm -rf $PIP_CACHE_DIR
+rm -rf $BUILD_DIR
+rm -rf $CONTAINER_IMAGES_DIR