Merge "Add SRIOV and QAT device plugin deploy components" into dev/icn-v0.1.0
authorHuifeng Le <huifeng.le@intel.com>
Mon, 19 Aug 2019 03:19:42 +0000 (03:19 +0000)
committerGerrit Code Review <gerrit@akraino.org>
Mon, 19 Aug 2019 03:19:42 +0000 (03:19 +0000)
74 files changed:
Makefile
cmd/bpa-controller/README.md [deleted file]
cmd/bpa-operator/.gitignore [new file with mode: 0644]
cmd/bpa-operator/bpa_operator_launch.sh [new file with mode: 0755]
cmd/bpa-operator/build/Dockerfile [new file with mode: 0644]
cmd/bpa-operator/build/bin/entrypoint [new file with mode: 0755]
cmd/bpa-operator/build/bin/user_setup [new file with mode: 0755]
cmd/bpa-operator/cmd/manager/main.go [new file with mode: 0644]
cmd/bpa-operator/deploy/crds/bpa_v1alpha1_provisioning_cr.yaml [new file with mode: 0644]
cmd/bpa-operator/deploy/crds/bpa_v1alpha1_provisioning_cr_2.yaml [new file with mode: 0644]
cmd/bpa-operator/deploy/crds/bpa_v1alpha1_provisioning_crd.yaml [new file with mode: 0644]
cmd/bpa-operator/deploy/operator.yaml [new file with mode: 0644]
cmd/bpa-operator/deploy/role.yaml [new file with mode: 0644]
cmd/bpa-operator/deploy/role_binding.yaml [new file with mode: 0644]
cmd/bpa-operator/deploy/service_account.yaml [new file with mode: 0644]
cmd/bpa-operator/go.mod [new file with mode: 0644]
cmd/bpa-operator/go.sum [new file with mode: 0644]
cmd/bpa-operator/pkg/apis/addtoscheme_bpa_v1alpha1.go [new file with mode: 0644]
cmd/bpa-operator/pkg/apis/apis.go [new file with mode: 0644]
cmd/bpa-operator/pkg/apis/bpa/group.go [new file with mode: 0644]
cmd/bpa-operator/pkg/apis/bpa/v1alpha1/doc.go [new file with mode: 0644]
cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go [new file with mode: 0644]
cmd/bpa-operator/pkg/apis/bpa/v1alpha1/register.go [new file with mode: 0644]
cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.deepcopy.go [new file with mode: 0644]
cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.openapi.go [new file with mode: 0644]
cmd/bpa-operator/pkg/controller/add_provisioning.go [new file with mode: 0644]
cmd/bpa-operator/pkg/controller/controller.go [new file with mode: 0644]
cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go [new file with mode: 0644]
cmd/bpa-operator/tools.go [new file with mode: 0644]
cmd/bpa-operator/version/version.go [new file with mode: 0644]
cmd/bpa-restapi-agent/Makefile [new file with mode: 0644]
cmd/bpa-restapi-agent/README.md
cmd/bpa-restapi-agent/api/api.go [new file with mode: 0644]
cmd/bpa-restapi-agent/api/imagehandler.go [new file with mode: 0644]
cmd/bpa-restapi-agent/docs/swagger.yaml [new file with mode: 0644]
cmd/bpa-restapi-agent/go.mod [new file with mode: 0644]
cmd/bpa-restapi-agent/go.sum [new file with mode: 0644]
cmd/bpa-restapi-agent/internal/app/image.go [new file with mode: 0644]
cmd/bpa-restapi-agent/internal/config/config.go [new file with mode: 0644]
cmd/bpa-restapi-agent/internal/db/mongo.go [new file with mode: 0644]
cmd/bpa-restapi-agent/internal/db/store.go [new file with mode: 0644]
cmd/bpa-restapi-agent/internal/utils.go [new file with mode: 0644]
cmd/bpa-restapi-agent/main.go [new file with mode: 0644]
cmd/bpa-restapi-agent/sample.json [new file with mode: 0644]
deploy/kud-plugin-addons/rook/README.md
deploy/kud-plugin-addons/rook/yaml/collect_rook_yaml.sh [new file with mode: 0755]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-provisioner-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-nodeplugin-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-provisioner-rbac.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/install.sh [new file with mode: 0755]
deploy/kud-plugin-addons/rook/yaml/rook-ceph-cluster.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/rook-common.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/rook-operator-with-csi.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/rook-toolbox.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/pod.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc-restore.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/secret.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshot.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshotclass.yaml [new file with mode: 0644]
deploy/kud-plugin-addons/rook/yaml/test/rbd/storageclass.yaml [new file with mode: 0644]
deploy/kud/kud_launch.sh [new file with mode: 0755]
deploy/metal3/scripts/metal3.sh [new file with mode: 0755]
env/01_install_package.sh [deleted file]
env/02_configure.sh [deleted file]
env/03_launch_prereq.sh [deleted file]
env/lib/common.sh [changed mode: 0644->0755]
env/lib/logging.sh [changed mode: 0644->0755]
env/metal3/01_install_package.sh [new file with mode: 0755]
env/metal3/02_configure.sh [new file with mode: 0755]
env/metal3/03_launch_prereq.sh [new file with mode: 0755]
env/ubuntu/bootloader-env/01_bootloader_package_req.sh [new file with mode: 0755]
env/ubuntu/bootloader-env/02_clean_bootloader_package_req.sh [new file with mode: 0755]

index e69de29..977aac9 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -0,0 +1,25 @@
+SHELL:=/bin/bash
+BMDIR:=$(CURDIR)/env/metal3
+METAL3DIR:=$(CURDIR)/deploy/metal3/scripts
+BPA_OPERATOR:=$(CURDIR)/cmd/bpa-operator/
+KUD_PATH:=$(CURDIR)/deploy/kud
+
+all: bm_install
+
+bm_preinstall:
+       pushd $(BMDIR) && ./01_install_package.sh && ./02_configure.sh && ./03_launch_prereq.sh && popd
+
+bm_install:
+       pushd $(METAL3DIR) && ./metal3.sh && popd 
+
+bm_all: bm_preinstall bm_install
+
+kud_download:
+       pushd $(KUD_PATH) && ./kud_launch.sh && popd
+
+bpa_op_install: kud_download
+       pushd $(BPA_OPERATOR) && ./bpa_operator_launch.sh && popd
+
+bpa_op_all: bm_all bpa_op_install      
+
+.PHONY: all bm_preinstall bm_install
diff --git a/cmd/bpa-controller/README.md b/cmd/bpa-controller/README.md
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/cmd/bpa-operator/.gitignore b/cmd/bpa-operator/.gitignore
new file mode 100644 (file)
index 0000000..7c50470
--- /dev/null
@@ -0,0 +1,77 @@
+# Temporary Build Files
+build/_output
+build/_test
+# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
+### Emacs ###
+# -*- mode: gitignore; -*-
+*~
+\#*\#
+/.emacs.desktop
+/.emacs.desktop.lock
+*.elc
+auto-save-list
+tramp
+.\#*
+# Org-mode
+.org-id-locations
+*_archive
+# flymake-mode
+*_flymake.*
+# eshell files
+/eshell/history
+/eshell/lastdir
+# elpa packages
+/elpa/
+# reftex files
+*.rel
+# AUCTeX auto folder
+/auto/
+# cask packages
+.cask/
+dist/
+# Flycheck
+flycheck_*.el
+# server auth directory
+/server/
+# projectiles files
+.projectile
+projectile-bookmarks.eld
+# directory configuration
+.dir-locals.el
+# saveplace
+places
+# url cache
+url/cache/
+# cedet
+ede-projects.el
+# smex
+smex-items
+# company-statistics
+company-statistics-cache.el
+# anaconda-mode
+anaconda-mode/
+### Go ###
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+# Test binary, build with 'go test -c'
+*.test
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+### Vim ###
+# swap
+.sw[a-p]
+.*.sw[a-p]
+# session
+Session.vim
+# temporary
+.netrwhist
+# auto-generated tag files
+tags
+### VisualStudioCode ###
+.vscode/*
+.history
+# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode
diff --git a/cmd/bpa-operator/bpa_operator_launch.sh b/cmd/bpa-operator/bpa_operator_launch.sh
new file mode 100755 (executable)
index 0000000..7152fc2
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+#Get Go ENV variables
+eval "$(go env)"
+
+export GO111MODULE=on
+go get -d github.com/operator-framework/operator-sdk # This will download the git repository and not install it
+pushd $GOPATH/src/github.com/operator-framework/operator-sdk
+git checkout master
+make tidy
+make install
+popd
+
+#Copy bpa operator directory to the right path
+kubectl create -f $PWD/deploy/crds/bpa_v1alpha1_provisioning_crd.yaml 
+echo $GOPATH
+mkdir -p $GOPATH/src/github.com/ && cp -r $PWD $GOPATH/src/github.com/bpa-operator
+pushd $GOPATH/src/github.com/bpa-operator
+operator-sdk up local --kubeconfig $HOME/.kube/config
+popd
diff --git a/cmd/bpa-operator/build/Dockerfile b/cmd/bpa-operator/build/Dockerfile
new file mode 100644 (file)
index 0000000..486db93
--- /dev/null
@@ -0,0 +1,15 @@
+FROM registry.access.redhat.com/ubi7/ubi-minimal:latest
+
+ENV OPERATOR=/usr/local/bin/bpa-operator \
+    USER_UID=1001 \
+    USER_NAME=bpa-operator
+
+# install operator binary
+COPY build/_output/bin/bpa-operator ${OPERATOR}
+
+COPY build/bin /usr/local/bin
+RUN  /usr/local/bin/user_setup
+
+ENTRYPOINT ["/usr/local/bin/entrypoint"]
+
+USER ${USER_UID}
diff --git a/cmd/bpa-operator/build/bin/entrypoint b/cmd/bpa-operator/build/bin/entrypoint
new file mode 100755 (executable)
index 0000000..4044594
--- /dev/null
@@ -0,0 +1,12 @@
+#!/bin/sh -e
+
+# This is documented here:
+# https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines
+
+if ! whoami &>/dev/null; then
+  if [ -w /etc/passwd ]; then
+    echo "${USER_NAME:-bpa-operator}:x:$(id -u):$(id -g):${USER_NAME:-bpa-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd
+  fi
+fi
+
+exec ${OPERATOR} $@
diff --git a/cmd/bpa-operator/build/bin/user_setup b/cmd/bpa-operator/build/bin/user_setup
new file mode 100755 (executable)
index 0000000..1e36064
--- /dev/null
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -x
+
+# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be)
+mkdir -p ${HOME}
+chown ${USER_UID}:0 ${HOME}
+chmod ug+rwx ${HOME}
+
+# runtime user will need to be able to self-insert in /etc/passwd
+chmod g+rw /etc/passwd
+
+# no need for this script to remain in the image after running
+rm $0
diff --git a/cmd/bpa-operator/cmd/manager/main.go b/cmd/bpa-operator/cmd/manager/main.go
new file mode 100644 (file)
index 0000000..406803c
--- /dev/null
@@ -0,0 +1,162 @@
+package main
+
+import (
+       "context"
+       "flag"
+       "fmt"
+       "os"
+       "runtime"
+
+       // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
+       _ "k8s.io/client-go/plugin/pkg/client/auth"
+       "k8s.io/client-go/rest"
+
+       "github.com/bpa-operator/pkg/apis"
+       "github.com/bpa-operator/pkg/controller"
+
+       "github.com/operator-framework/operator-sdk/pkg/k8sutil"
+       kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
+       "github.com/operator-framework/operator-sdk/pkg/leader"
+       "github.com/operator-framework/operator-sdk/pkg/log/zap"
+       "github.com/operator-framework/operator-sdk/pkg/metrics"
+       "github.com/operator-framework/operator-sdk/pkg/restmapper"
+       sdkVersion "github.com/operator-framework/operator-sdk/version"
+       "github.com/spf13/pflag"
+       v1 "k8s.io/api/core/v1"
+       "k8s.io/apimachinery/pkg/util/intstr"
+       "sigs.k8s.io/controller-runtime/pkg/client/config"
+       "sigs.k8s.io/controller-runtime/pkg/manager"
+       logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+       "sigs.k8s.io/controller-runtime/pkg/runtime/signals"
+)
+
+// Change below variables to serve metrics on different host or port.
+var (
+       metricsHost               = "0.0.0.0"
+       metricsPort         int32 = 8383
+       operatorMetricsPort int32 = 8686
+)
+var log = logf.Log.WithName("cmd")
+
+func printVersion() {
+       log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
+       log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
+       log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
+}
+
+func main() {
+       // Add the zap logger flag set to the CLI. The flag set must
+       // be added before calling pflag.Parse().
+       pflag.CommandLine.AddFlagSet(zap.FlagSet())
+
+       // Add flags registered by imported packages (e.g. glog and
+       // controller-runtime)
+       pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
+
+       pflag.Parse()
+
+       // Use a zap logr.Logger implementation. If none of the zap
+       // flags are configured (or if the zap flag set is not being
+       // used), this defaults to a production zap logger.
+       //
+       // The logger instantiated here can be changed to any logger
+       // implementing the logr.Logger interface. This logger will
+       // be propagated through the whole operator, generating
+       // uniform and structured logs.
+       logf.SetLogger(zap.Logger())
+
+       printVersion()
+
+       namespace, err := k8sutil.GetWatchNamespace()
+       if err != nil {
+               log.Error(err, "Failed to get watch namespace")
+               os.Exit(1)
+       }
+
+       // Get a config to talk to the apiserver
+       cfg, err := config.GetConfig()
+       if err != nil {
+               log.Error(err, "")
+               os.Exit(1)
+       }
+
+       ctx := context.TODO()
+       // Become the leader before proceeding
+       err = leader.Become(ctx, "bpa-operator-lock")
+       if err != nil {
+               log.Error(err, "")
+               os.Exit(1)
+       }
+
+       // Create a new Cmd to provide shared dependencies and start components
+       mgr, err := manager.New(cfg, manager.Options{
+               Namespace:          namespace,
+               MapperProvider:     restmapper.NewDynamicRESTMapper,
+               MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
+       })
+       if err != nil {
+               log.Error(err, "")
+               os.Exit(1)
+       }
+
+       log.Info("Registering Components.")
+
+       // Setup Scheme for all resources
+       if err := apis.AddToScheme(mgr.GetScheme()); err != nil {
+               log.Error(err, "")
+               os.Exit(1)
+       }
+
+       // Setup all Controllers
+       if err := controller.AddToManager(mgr); err != nil {
+               log.Error(err, "")
+               os.Exit(1)
+       }
+
+       if err = serveCRMetrics(cfg); err != nil {
+               log.Info("Could not generate and serve custom resource metrics", "error", err.Error())
+       }
+
+       // Add to the below struct any other metrics ports you want to expose.
+       servicePorts := []v1.ServicePort{
+               {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
+               {Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
+       }
+       // Create Service object to expose the metrics port(s).
+       _, err = metrics.CreateMetricsService(ctx, cfg, servicePorts)
+       if err != nil {
+               log.Info(err.Error())
+       }
+
+       log.Info("Starting the Cmd.")
+
+       // Start the Cmd
+       if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
+               log.Error(err, "Manager exited non-zero")
+               os.Exit(1)
+       }
+}
+
+// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
+// It serves those metrics on "http://metricsHost:operatorMetricsPort".
+func serveCRMetrics(cfg *rest.Config) error {
+       // Below function returns filtered operator/CustomResource specific GVKs.
+       // For more control override the below GVK list with your own custom logic.
+       filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme)
+       if err != nil {
+               return err
+       }
+       // Get the namespace the operator is currently deployed in.
+       operatorNs, err := k8sutil.GetOperatorNamespace()
+       if err != nil {
+               return err
+       }
+       // To generate metrics in other namespaces, add the values below.
+       ns := []string{operatorNs}
+       // Generate and serve custom resource specific metrics.
+       err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
+       if err != nil {
+               return err
+       }
+       return nil
+}
diff --git a/cmd/bpa-operator/deploy/crds/bpa_v1alpha1_provisioning_cr.yaml b/cmd/bpa-operator/deploy/crds/bpa_v1alpha1_provisioning_cr.yaml
new file mode 100644 (file)
index 0000000..7dd9f34
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+  name: provisioning-sample-2
+  labels:
+    cluster: cluster-xyz
+    owner: c1
+spec:
+  masters:
+    - master-1:
+        mac-address: 00:c6:14:04:61:b2
+    - master-2:
+        mac-address: 00:c5:12:06:61:b2
+  workers:
+    - worker-1:
+         mac-address: 00:c4:13:04:62:b5
+  hostfile: /root/go/src/test-code/hosts.ini
diff --git a/cmd/bpa-operator/deploy/crds/bpa_v1alpha1_provisioning_cr_2.yaml b/cmd/bpa-operator/deploy/crds/bpa_v1alpha1_provisioning_cr_2.yaml
new file mode 100644 (file)
index 0000000..5b485a3
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+  name: provisioning-sample-two
+  labels:
+    cluster: cluster-abc
+    owner: c1
+spec:
+  masters:
+    - master-1:
+            mac-address: a4:bf:0f:63:85:66
+  workers:
+    - worker-1:
+            mac-address: b5:bf:0f:63:85:61
+    - worker-2:
+            mac-address: 34:f2:fd:9c:87:62
+  hostfile: /root/go/src/test-code/testhost.ini 
diff --git a/cmd/bpa-operator/deploy/crds/bpa_v1alpha1_provisioning_crd.yaml b/cmd/bpa-operator/deploy/crds/bpa_v1alpha1_provisioning_crd.yaml
new file mode 100644 (file)
index 0000000..778b194
--- /dev/null
@@ -0,0 +1,40 @@
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: provisionings.bpa.akraino.org
+spec:
+  group: bpa.akraino.org
+  names:
+    kind: Provisioning
+    listKind: ProvisioningList
+    plural: provisionings
+    singular: provisioning
+    shortNames:
+    - bpa
+  scope: Namespaced
+  subresources:
+    status: {}
+  validation:
+    openAPIV3Schema:
+      properties:
+        apiVersion:
+          description: 'APIVersion defines the versioned schema of this representation
+            of an object. Servers should convert recognized schemas to the latest
+            internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
+          type: string
+        kind:
+          description: 'Kind is a string value representing the REST resource this
+            object represents. Servers may infer this from the endpoint the client
+            submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
+          type: string
+        metadata:
+          type: object
+        spec:
+          type: object
+        status:
+          type: object
+  version: v1alpha1
+  versions:
+  - name: v1alpha1
+    served: true
+    storage: true
diff --git a/cmd/bpa-operator/deploy/operator.yaml b/cmd/bpa-operator/deploy/operator.yaml
new file mode 100644 (file)
index 0000000..913c062
--- /dev/null
@@ -0,0 +1,33 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: bpa-operator
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      name: bpa-operator
+  template:
+    metadata:
+      labels:
+        name: bpa-operator
+    spec:
+      serviceAccountName: bpa-operator
+      containers:
+        - name: bpa-operator
+          # Replace this with the built image name
+          image: REPLACE_IMAGE
+          command:
+          - bpa-operator
+          imagePullPolicy: Always
+          env:
+            - name: WATCH_NAMESPACE
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            - name: POD_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.name
+            - name: OPERATOR_NAME
+              value: "bpa-operator"
diff --git a/cmd/bpa-operator/deploy/role.yaml b/cmd/bpa-operator/deploy/role.yaml
new file mode 100644 (file)
index 0000000..fbe90b2
--- /dev/null
@@ -0,0 +1,60 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+  creationTimestamp: null
+  name: bpa-operator
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - services
+  - endpoints
+  - persistentvolumeclaims
+  - events
+  - configmaps
+  - secrets
+  verbs:
+  - '*'
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  - daemonsets
+  - replicasets
+  - statefulsets
+  verbs:
+  - '*'
+- apiGroups:
+  - monitoring.coreos.com
+  resources:
+  - servicemonitors
+  verbs:
+  - get
+  - create
+- apiGroups:
+  - apps
+  resourceNames:
+  - bpa-operator
+  resources:
+  - deployments/finalizers
+  verbs:
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  verbs:
+  - get
+- apiGroups:
+  - apps
+  resources:
+  - replicasets
+  verbs:
+  - get
+- apiGroups:
+  - bpa.akraino.org
+  resources:
+  - '*'
+  verbs:
+  - '*'
diff --git a/cmd/bpa-operator/deploy/role_binding.yaml b/cmd/bpa-operator/deploy/role_binding.yaml
new file mode 100644 (file)
index 0000000..fab2338
--- /dev/null
@@ -0,0 +1,11 @@
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: bpa-operator
+subjects:
+- kind: ServiceAccount
+  name: bpa-operator
+roleRef:
+  kind: Role
+  name: bpa-operator
+  apiGroup: rbac.authorization.k8s.io
diff --git a/cmd/bpa-operator/deploy/service_account.yaml b/cmd/bpa-operator/deploy/service_account.yaml
new file mode 100644 (file)
index 0000000..6157f10
--- /dev/null
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: bpa-operator
diff --git a/cmd/bpa-operator/go.mod b/cmd/bpa-operator/go.mod
new file mode 100644 (file)
index 0000000..086123d
--- /dev/null
@@ -0,0 +1,31 @@
+module github.com/bpa-operator
+
+require (
+       github.com/NYTimes/gziphandler v1.0.1 // indirect
+       github.com/operator-framework/operator-sdk v0.10.0
+       github.com/spf13/pflag v1.0.3
+       gopkg.in/ini.v1 v1.46.0
+       k8s.io/api v0.0.0-20190612125737-db0771252981
+       k8s.io/apimachinery v0.0.0-20190612125636-6a5db36e93ad
+       k8s.io/client-go v11.0.0+incompatible
+       k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 // indirect
+       sigs.k8s.io/controller-runtime v0.1.12
+       sigs.k8s.io/controller-tools v0.1.10
+)
+
+// Pinned to kubernetes-1.13.4
+replace (
+       k8s.io/api => k8s.io/api v0.0.0-20190222213804-5cb15d344471
+       k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236
+       k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628
+       k8s.io/client-go => k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4
+)
+
+replace (
+       github.com/coreos/prometheus-operator => github.com/coreos/prometheus-operator v0.29.0
+       k8s.io/kube-state-metrics => k8s.io/kube-state-metrics v1.6.0
+       sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.1.12
+       sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.1.11-0.20190411181648-9d55346c2bde
+)
+
+replace github.com/operator-framework/operator-sdk => github.com/operator-framework/operator-sdk v0.9.0
diff --git a/cmd/bpa-operator/go.sum b/cmd/bpa-operator/go.sum
new file mode 100644 (file)
index 0000000..88bcc02
--- /dev/null
@@ -0,0 +1,537 @@
+bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c/go.mod h1:1vhO7Mn/FZMgOgDVGLy5X1mE6rq1HbkBdkF/yj8zkcg=
+cloud.google.com/go v0.0.0-20160913182117-3b1ae45394a2/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.2 h1:4y4L7BdHenTfZL0HervofNTHh9Ad6mNX72cQvl+5eH0=
+cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA=
+contrib.go.opencensus.io/exporter/ocagent v0.4.9/go.mod h1:ueLzZcP7LPhPulEBukGn4aLh7Mx9YJwpVJ9nL2FYltw=
+contrib.go.opencensus.io/exporter/ocagent v0.4.11 h1:Zwy9skaqR2igcEfSVYDuAsbpa33N0RPtnYTHEe2whPI=
+contrib.go.opencensus.io/exporter/ocagent v0.4.11/go.mod h1:7ihiYRbdcVfW4m4wlXi9WRPdv79C0fStcjNlyE6ek9s=
+git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest v11.1.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v11.5.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v11.7.0+incompatible h1:gzma19dc9ejB75D90E5S+/wXouzpZyA+CV+/MJPSD/k=
+github.com/Azure/go-autorest v11.7.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
+github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/sprig v0.0.0-20190301161902-9f8fceff796f/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30 h1:Kn3rqvbUFqSepE2OqVu0Pn1CbDw9IuMlONapol0zuwk=
+github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
+github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
+github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
+github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.9+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/prometheus-operator v0.29.0 h1:Moi4klbr1xUVaofWzlaM12mxwCL294GiLW2Qj8ku0sY=
+github.com/coreos/prometheus-operator v0.29.0/go.mod h1:SO+r5yZUacDFPKHfPoUjI3hMsH+ZUdiuNNhuSq3WoSg=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/docker/distribution v2.6.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v0.0.0-20180612054059-a9fbbdc8dd87/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
+github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.8.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.8.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.3+incompatible h1:2OwhVdhtzYUp5P5wuGsVDPagKSRd9JK72sJCHVCXh5g=
+github.com/emicklei/go-restful v2.9.3+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ=
+github.com/evanphx/json-patch v3.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
+github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
+github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE=
+github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
+github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
+github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8=
+github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk=
+github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.19.0 h1:A4SZ6IWh3lnjH0rG0Z5lkxazMGBECtrZcbyYQi+64k4=
+github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.19.0 h1:Kg7Wl7LkTPlmc393QZQ/5rQadPhi7pBVEMZxyTi0Ii8=
+github.com/go-openapi/swag v0.19.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
+github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+github.com/gogo/protobuf v0.0.0-20170330071051-c0656edd0d9e/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
+github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/gophercloud/gophercloud v0.0.0-20180330165814-781450b3c4fc/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
+github.com/gophercloud/gophercloud v0.0.0-20190318015731-ff9851476e98/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/gophercloud/gophercloud v0.0.0-20190408160324-6c7ac67f8855 h1:3dfUujjROkkXcwIpsh9z6bjOhPFooLpxejc7qgX13/g=
+github.com/gophercloud/gophercloud v0.0.0-20190408160324-6c7ac67f8855/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM=
+github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/grpc-ecosystem/grpc-gateway v1.6.3/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-health-probe v0.2.0/go.mod h1:4GVx/bTCtZaSzhjbGueDY5YgBdsmKeVx+LErv/n0L6s=
+github.com/hashicorp/golang-lru v0.0.0-20160207214719-a0d98a5f2880/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
+github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
+github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190403194419-1ea4449da983 h1:wL11wNW7dhKIcRCHSm4sHKPWz0tt4mwBsVodG7+Xyqg=
+github.com/mailru/easyjson v0.0.0-20190403194419-1ea4449da983/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs=
+github.com/martinlindhe/base36 v0.0.0-20180729042928-5cda0030da17/go.mod h1:+AtEs8xrBpCeYgSLoY/aJ6Wf37jtBuR0s35750M27+8=
+github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/maxbrunsfeld/counterfeiter v0.0.0-20181017030959-1aadac120687/go.mod h1:aoVsckWnsNzazwF2kmD+bzgdr4GBlbK91zsdivQJ2eU=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.2-0.20180831124310-ae19f1b56d53/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/openshift/origin v0.0.0-20160503220234-8f127d736703/go.mod h1:0Rox5r9C8aQn6j1oAOQ0c1uC86mYbUFObzjBRvUKHII=
+github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/operator-framework/operator-lifecycle-manager v0.0.0-20181023032605-e838f7fb2186/go.mod h1:Ma5ZXd4S1vmMyewWlF7aO8CZiokR7Sd8dhSfkGkNU4U=
+github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190105193533-81104ffdc4fb/go.mod h1:XMyE4n2opUK4N6L45YGQkXXi8F9fD7XDYFv/CsS6V5I=
+github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190128024246-5eb7ae5bdb7a/go.mod h1:vq6TTFvg6ti1Bn6ACsZneZTmjTsURgDD6tQtVDbEgsU=
+github.com/operator-framework/operator-registry v1.0.1/go.mod h1:1xEdZjjUg2hPEd52LG3YQ0jtwiwEGdm98S1TH5P4RAA=
+github.com/operator-framework/operator-registry v1.0.4/go.mod h1:hve6YwcjM2nGVlscLtNsp9sIIBkNZo6jlJgzWw7vP9s=
+github.com/operator-framework/operator-sdk v0.9.0 h1:moY3n5vsg4OpD3FzHvqI68Fv+gJFQ1GaDKMHm2NRpF8=
+github.com/operator-framework/operator-sdk v0.9.0/go.mod h1:7eW7ldXmvenehIMVdO2zCdERf/828Mrftq4u7GS0I68=
+github.com/pborman/uuid v0.0.0-20180906182336-adf5a7427709/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
+github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
+github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4=
+github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20190104105734-b1c43a6df3ae/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190104112138-b1a0a9a36d74/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190403104016-ea9eea638872 h1:0aNv3xC7DmQoy1/x1sMh18g+fihWW68LL13i8ao9kl4=
+github.com/prometheus/procfs v0.0.0-20190403104016-ea9eea638872/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/robfig/cron v0.0.0-20170526150127-736158dc09e1/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/russross/blackfriday v0.0.0-20151117072312-300106c228d5/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/sclevine/spec v1.0.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
+github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+github.com/stevvooe/resumable v0.0.0-20180830230917-22b14a53ba50/go.mod h1:1pdIZTAHUz+HDKDVZ++5xg/duPlhKAIzw9qy42CWYp4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
+github.com/technosophos/moniker v0.0.0-20180509230615-a5dbd03a2245/go.mod h1:O1c8HleITsZqzNZDjSNzirUGsMT0oGu9LhHKoJrqO+A=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
+github.com/ugorji/go/codec v0.0.0-20181022190402-e5e69e061d4f/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
+go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
+go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
+go.opencensus.io v0.20.0 h1:L/ARO58pdktB6dLmYI0zAyW1XnavEmGziFd0MKfxnck=
+go.opencensus.io v0.20.0/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
+go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
+golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4=
+golang.org/x/crypto v0.0.0-20180222182404-49796115aa4b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5 h1:bselrhR0Or1vomJZC8ZIjWtbDmn9OYFLX5Ik9alpJpE=
+golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/oauth2 v0.0.0-20170412232759-a6bd8cefa181/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181105165119-ca4130e427c7/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
+golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181023152157-44b849a8bc13/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011152555-a398e557df60/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181207222222-4c874b978acb/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190213015956-f7e1b50d2251/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190408170212-12dd9f86f350/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU=
+google.golang.org/api v0.3.0 h1:UIJY20OEo3+tK5MBlcdx37kmdH6EnRjGkW78mc6+EeA=
+google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181016170114-94acd270e44e/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
+google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.46.0 h1:VeDZbLYGaupuvIrsYCEOe/L/2Pcs5n7hdO1ZTjporag=
+gopkg.in/ini.v1 v1.46.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE=
+k8s.io/api v0.0.0-20190222213804-5cb15d344471/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
+k8s.io/apiextensions-apiserver v0.0.0-20190228180357-d002e88f6236/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
+k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 h1:UYfHH+KEF88OTg+GojQUwFTNxbxwmoktLwutUzR0GPg=
+k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
+k8s.io/apiserver v0.0.0-20181026151315-13cfe3978170/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w=
+k8s.io/apiserver v0.0.0-20181213151703-3ccfe8365421/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w=
+k8s.io/cli-runtime v0.0.0-20181213153952-835b10687cb6/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM=
+k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 h1:aE8wOCKuoRs2aU0OP/Rz8SXiAB0FTTku3VtGhhrkSmc=
+k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
+k8s.io/code-generator v0.0.0-20181203235156-f8cba74510f3/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8=
+k8s.io/gengo v0.0.0-20181106084056-51747d6e00da/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20181113154421-fd15ee9cc2f7/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20190327210449-e17681d19d3a/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/helm v2.13.1+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68=
+k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/kube-aggregator v0.0.0-20181204002017-122bac39d429/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU=
+k8s.io/kube-aggregator v0.0.0-20181213152105-1e8cd453c474/go.mod h1:8sbzT4QQKDEmSCIbfqjV0sd97GpUT7A4W626sBiYJmU=
+k8s.io/kube-openapi v0.0.0-20181031203759-72693cb1fadd/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
+k8s.io/kube-openapi v0.0.0-20190320154901-5e45bb682580/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
+k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208 h1:5sW+fEHvlJI3Ngolx30CmubFulwH28DhKjGf70Xmtco=
+k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4=
+k8s.io/kube-state-metrics v1.6.0 h1:6wkxiTPTZ4j+LoMWuT+rZ+OlUWswktzXs5yHmggmAZ0=
+k8s.io/kube-state-metrics v1.6.0/go.mod h1:84+q9aGVQPzXYGgtvyhZr/fSI6BdLsbPWXn37RASc9k=
+k8s.io/kubernetes v1.11.7-beta.0.0.20181219023948-b875d52ea96d/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/kubernetes v1.11.8-beta.0.0.20190124204751-3a10094374f2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
+sigs.k8s.io/controller-runtime v0.1.12 h1:ovDq28E64PeY1yR+6H7DthakIC09soiDCrKvfP2tPYo=
+sigs.k8s.io/controller-runtime v0.1.12/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8=
+sigs.k8s.io/controller-tools v0.1.11-0.20190411181648-9d55346c2bde/go.mod h1:ATWLRP3WGxuAN9HcT2LaKHReXIH+EZGzRuMHuxjXfhQ=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/testing_frameworks v0.1.0/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
+sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
+sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
diff --git a/cmd/bpa-operator/pkg/apis/addtoscheme_bpa_v1alpha1.go b/cmd/bpa-operator/pkg/apis/addtoscheme_bpa_v1alpha1.go
new file mode 100644 (file)
index 0000000..ef79c5c
--- /dev/null
@@ -0,0 +1,10 @@
+package apis
+
+import (
+       "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
+)
+
+func init() {
+       // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
+       AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme)
+}
diff --git a/cmd/bpa-operator/pkg/apis/apis.go b/cmd/bpa-operator/pkg/apis/apis.go
new file mode 100644 (file)
index 0000000..07dc961
--- /dev/null
@@ -0,0 +1,13 @@
+package apis
+
+import (
+       "k8s.io/apimachinery/pkg/runtime"
+)
+
+// AddToSchemes may be used to add all resources defined in the project to a Scheme
+var AddToSchemes runtime.SchemeBuilder
+
+// AddToScheme adds all Resources to the Scheme
+func AddToScheme(s *runtime.Scheme) error {
+       return AddToSchemes.AddToScheme(s)
+}
diff --git a/cmd/bpa-operator/pkg/apis/bpa/group.go b/cmd/bpa-operator/pkg/apis/bpa/group.go
new file mode 100644 (file)
index 0000000..28de849
--- /dev/null
@@ -0,0 +1,6 @@
+// Package bpa contains bpa API versions.
+//
+// This file ensures Go source parsers acknowledge the bpa package
+// and any child packages. It can be removed if any other Go source files are
+// added to this package.
+package bpa
diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/doc.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/doc.go
new file mode 100644 (file)
index 0000000..3e94580
--- /dev/null
@@ -0,0 +1,4 @@
+// Package v1alpha1 contains API Schema definitions for the bpa v1alpha1 API group
+// +k8s:deepcopy-gen=package,register
+// +groupName=bpa.akraino.org
+package v1alpha1
diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go
new file mode 100644 (file)
index 0000000..e9b02c2
--- /dev/null
@@ -0,0 +1,69 @@
+package v1alpha1
+
+import (
+       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// EDIT THIS FILE!  THIS IS SCAFFOLDING FOR YOU TO OWN!
+// NOTE: json tags are required.  Any new fields you add must have json tags for the fields to be serialized.
+
+// ProvisioningSpec defines the desired state of Provisioning
+// +k8s:openapi-gen=true
+type ProvisioningSpec struct {
+       // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
+       // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
+       // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
+       Masters []map[string]Master  `json:"masters,omitempty"`
+       Workers []map[string]Worker  `json:"workers,omitempty"`
+       HostsFile string `json:"hostfile,omitempty"`
+}
+
+// ProvisioningStatus defines the observed state of Provisioning
+// +k8s:openapi-gen=true
+type ProvisioningStatus struct {
+       // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster
+       // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file
+       // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Provisioning is the Schema for the provisionings API
+// +k8s:openapi-gen=true
+// +kubebuilder:subresource:status
+type Provisioning struct {
+       metav1.TypeMeta   `json:",inline"`
+       metav1.ObjectMeta `json:"metadata,omitempty"`
+
+       Spec   ProvisioningSpec   `json:"spec,omitempty"`
+       Status ProvisioningStatus `json:"status,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ProvisioningList contains a list of Provisioning
+type ProvisioningList struct {
+       metav1.TypeMeta `json:",inline"`
+       metav1.ListMeta `json:"metadata,omitempty"`
+       Items           []Provisioning `json:"items"`
+}
+
+// master struct contains resource requirements for a master node
+type Master struct {
+       MACaddress string `json:"mac-address,omitempty"`
+       CPU int32  `json:"cpu,omitempty"`
+       Memory string  `json:"memory,omitempty"`
+}
+
+// worker struct contains resource requirements for a worker node
+type Worker struct {
+       MACaddress string `json:"mac-address,omitempty"`
+       CPU int32 `json:"cpu,omitempty"`
+       Memory string  `json:"memory,omitempty"`
+       SRIOV bool  `json:"sriov,omitempty"`
+       QAT  bool      `json:"qat,omitempty"`
+}
+
+func init() {
+       SchemeBuilder.Register(&Provisioning{}, &ProvisioningList{})
+}
diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/register.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/register.go
new file mode 100644 (file)
index 0000000..3dc2439
--- /dev/null
@@ -0,0 +1,19 @@
+// NOTE: Boilerplate only.  Ignore this file.
+
+// Package v1alpha1 contains API Schema definitions for the bpa v1alpha1 API group
+// +k8s:deepcopy-gen=package,register
+// +groupName=bpa.akraino.org
+package v1alpha1
+
+import (
+       "k8s.io/apimachinery/pkg/runtime/schema"
+       "sigs.k8s.io/controller-runtime/pkg/runtime/scheme"
+)
+
+var (
+       // SchemeGroupVersion is group version used to register these objects
+       SchemeGroupVersion = schema.GroupVersion{Group: "bpa.akraino.org", Version: "v1alpha1"}
+
+       // SchemeBuilder is used to add go types to the GroupVersionKind scheme
+       SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
+)
diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.deepcopy.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.deepcopy.go
new file mode 100644 (file)
index 0000000..cf76880
--- /dev/null
@@ -0,0 +1,160 @@
+// +build !ignore_autogenerated
+
+// Code generated by operator-sdk. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+       runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Master) DeepCopyInto(out *Master) {
+       *out = *in
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Master.
+func (in *Master) DeepCopy() *Master {
+       if in == nil {
+               return nil
+       }
+       out := new(Master)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Provisioning) DeepCopyInto(out *Provisioning) {
+       *out = *in
+       out.TypeMeta = in.TypeMeta
+       in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+       in.Spec.DeepCopyInto(&out.Spec)
+       out.Status = in.Status
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Provisioning.
+func (in *Provisioning) DeepCopy() *Provisioning {
+       if in == nil {
+               return nil
+       }
+       out := new(Provisioning)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Provisioning) DeepCopyObject() runtime.Object {
+       if c := in.DeepCopy(); c != nil {
+               return c
+       }
+       return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProvisioningList) DeepCopyInto(out *ProvisioningList) {
+       *out = *in
+       out.TypeMeta = in.TypeMeta
+       out.ListMeta = in.ListMeta
+       if in.Items != nil {
+               in, out := &in.Items, &out.Items
+               *out = make([]Provisioning, len(*in))
+               for i := range *in {
+                       (*in)[i].DeepCopyInto(&(*out)[i])
+               }
+       }
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningList.
+func (in *ProvisioningList) DeepCopy() *ProvisioningList {
+       if in == nil {
+               return nil
+       }
+       out := new(ProvisioningList)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ProvisioningList) DeepCopyObject() runtime.Object {
+       if c := in.DeepCopy(); c != nil {
+               return c
+       }
+       return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProvisioningSpec) DeepCopyInto(out *ProvisioningSpec) {
+       *out = *in
+       if in.Masters != nil {
+               in, out := &in.Masters, &out.Masters
+               *out = make([]map[string]Master, len(*in))
+               for i := range *in {
+                       if (*in)[i] != nil {
+                               in, out := &(*in)[i], &(*out)[i]
+                               *out = make(map[string]Master, len(*in))
+                               for key, val := range *in {
+                                       (*out)[key] = val
+                               }
+                       }
+               }
+       }
+       if in.Workers != nil {
+               in, out := &in.Workers, &out.Workers
+               *out = make([]map[string]Worker, len(*in))
+               for i := range *in {
+                       if (*in)[i] != nil {
+                               in, out := &(*in)[i], &(*out)[i]
+                               *out = make(map[string]Worker, len(*in))
+                               for key, val := range *in {
+                                       (*out)[key] = val
+                               }
+                       }
+               }
+       }
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningSpec.
+func (in *ProvisioningSpec) DeepCopy() *ProvisioningSpec {
+       if in == nil {
+               return nil
+       }
+       out := new(ProvisioningSpec)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ProvisioningStatus) DeepCopyInto(out *ProvisioningStatus) {
+       *out = *in
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisioningStatus.
+func (in *ProvisioningStatus) DeepCopy() *ProvisioningStatus {
+       if in == nil {
+               return nil
+       }
+       out := new(ProvisioningStatus)
+       in.DeepCopyInto(out)
+       return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Worker) DeepCopyInto(out *Worker) {
+       *out = *in
+       return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Worker.
+func (in *Worker) DeepCopy() *Worker {
+       if in == nil {
+               return nil
+       }
+       out := new(Worker)
+       in.DeepCopyInto(out)
+       return out
+}
diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.openapi.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/zz_generated.openapi.go
new file mode 100644 (file)
index 0000000..558f68a
--- /dev/null
@@ -0,0 +1,85 @@
+// +build !
+
+// This file was autogenerated by openapi-gen. Do not edit it manually!
+
+package v1alpha1
+
+import (
+       spec "github.com/go-openapi/spec"
+       common "k8s.io/kube-openapi/pkg/common"
+)
+
+func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
+       return map[string]common.OpenAPIDefinition{
+               "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.Provisioning":       schema_pkg_apis_bpa_v1alpha1_Provisioning(ref),
+               "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningSpec":   schema_pkg_apis_bpa_v1alpha1_ProvisioningSpec(ref),
+               "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningStatus": schema_pkg_apis_bpa_v1alpha1_ProvisioningStatus(ref),
+       }
+}
+
+func schema_pkg_apis_bpa_v1alpha1_Provisioning(ref common.ReferenceCallback) common.OpenAPIDefinition {
+       return common.OpenAPIDefinition{
+               Schema: spec.Schema{
+                       SchemaProps: spec.SchemaProps{
+                               Description: "Provisioning is the Schema for the provisionings API",
+                               Properties: map[string]spec.Schema{
+                                       "kind": {
+                                               SchemaProps: spec.SchemaProps{
+                                                       Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
+                                                       Type:        []string{"string"},
+                                                       Format:      "",
+                                               },
+                                       },
+                                       "apiVersion": {
+                                               SchemaProps: spec.SchemaProps{
+                                                       Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
+                                                       Type:        []string{"string"},
+                                                       Format:      "",
+                                               },
+                                       },
+                                       "metadata": {
+                                               SchemaProps: spec.SchemaProps{
+                                                       Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
+                                               },
+                                       },
+                                       "spec": {
+                                               SchemaProps: spec.SchemaProps{
+                                                       Ref: ref("github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningSpec"),
+                                               },
+                                       },
+                                       "status": {
+                                               SchemaProps: spec.SchemaProps{
+                                                       Ref: ref("github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningStatus"),
+                                               },
+                                       },
+                               },
+                       },
+               },
+               Dependencies: []string{
+                       "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningSpec", "github.com/bpa-operator/pkg/apis/bpa/v1alpha1.ProvisioningStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
+       }
+}
+
+func schema_pkg_apis_bpa_v1alpha1_ProvisioningSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
+       return common.OpenAPIDefinition{
+               Schema: spec.Schema{
+                       SchemaProps: spec.SchemaProps{
+                               Description: "ProvisioningSpec defines the desired state of Provisioning",
+                               Properties:  map[string]spec.Schema{},
+                       },
+               },
+               Dependencies: []string{},
+       }
+}
+
+func schema_pkg_apis_bpa_v1alpha1_ProvisioningStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
+       return common.OpenAPIDefinition{
+               Schema: spec.Schema{
+                       SchemaProps: spec.SchemaProps{
+                               Description: "ProvisioningStatus defines the observed state of Provisioning",
+                               Properties:  map[string]spec.Schema{},
+                       },
+               },
+               Dependencies: []string{},
+       }
+}
diff --git a/cmd/bpa-operator/pkg/controller/add_provisioning.go b/cmd/bpa-operator/pkg/controller/add_provisioning.go
new file mode 100644 (file)
index 0000000..4f853cd
--- /dev/null
@@ -0,0 +1,10 @@
+package controller
+
+import (
+       "github.com/bpa-operator/pkg/controller/provisioning"
+)
+
+func init() {
+       // AddToManagerFuncs is a list of functions to create controllers and add them to a manager.
+       AddToManagerFuncs = append(AddToManagerFuncs, provisioning.Add)
+}
diff --git a/cmd/bpa-operator/pkg/controller/controller.go b/cmd/bpa-operator/pkg/controller/controller.go
new file mode 100644 (file)
index 0000000..7c069f3
--- /dev/null
@@ -0,0 +1,18 @@
+package controller
+
+import (
+       "sigs.k8s.io/controller-runtime/pkg/manager"
+)
+
+// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
+var AddToManagerFuncs []func(manager.Manager) error
+
+// AddToManager adds all Controllers to the Manager
+func AddToManager(m manager.Manager) error {
+       for _, f := range AddToManagerFuncs {
+               if err := f(m); err != nil {
+                       return err
+               }
+       }
+       return nil
+}
diff --git a/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go b/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go
new file mode 100644 (file)
index 0000000..8031cd3
--- /dev/null
@@ -0,0 +1,353 @@
+package provisioning
+
+import (
+       "context"
+        "os"
+        "fmt"
+        "bytes"
+       "regexp"
+       "strings"
+       "io/ioutil"
+        "path/filepath"
+        "os/user"
+        "os/exec"
+
+
+       bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
+        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+        "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+        "k8s.io/apimachinery/pkg/runtime/schema"
+       "k8s.io/apimachinery/pkg/api/errors"
+       "k8s.io/apimachinery/pkg/runtime"
+        "k8s.io/client-go/tools/clientcmd"
+        "k8s.io/client-go/dynamic"
+       "sigs.k8s.io/controller-runtime/pkg/client"
+       "sigs.k8s.io/controller-runtime/pkg/controller"
+       "sigs.k8s.io/controller-runtime/pkg/handler"
+       "sigs.k8s.io/controller-runtime/pkg/manager"
+       "sigs.k8s.io/controller-runtime/pkg/reconcile"
+       logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
+       "sigs.k8s.io/controller-runtime/pkg/source"
+        "gopkg.in/ini.v1"
+)
+
+var log = logf.Log.WithName("controller_provisioning")
+//Todo: Should be an input from the user
+var dhcpLeaseFile = "/var/lib/dhcp/dhcpd.leases"
+var kudInstallerScript = "/root/icn/deploy/kud/multicloud-k8s/kud/hosting_providers/vagrant"
+
+/**
+* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
+* business logic.  Delete these comments after modifying this file.*
+ */
+
+// Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
+// and Start it when the Manager is Started.
+func Add(mgr manager.Manager) error {
+       return add(mgr, newReconciler(mgr))
+}
+
+// newReconciler returns a new reconcile.Reconciler
+func newReconciler(mgr manager.Manager) reconcile.Reconciler {
+       return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme()}
+}
+
+// add adds a new Controller to mgr with r as the reconcile.Reconciler
+func add(mgr manager.Manager, r reconcile.Reconciler) error {
+       // Create a new controller
+       c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
+       if err != nil {
+               return err
+       }
+
+       // Watch for changes to primary resource Provisioning
+       err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
+       if err != nil {
+               return err
+       }
+
+
+       return nil
+}
+
+// blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
+var _ reconcile.Reconciler = &ReconcileProvisioning{}
+
+// ReconcileProvisioning reconciles a Provisioning object
+type ReconcileProvisioning struct {
+       // This client, initialized using mgr.Client() above, is a split client
+       // that reads objects from the cache and writes to the apiserver
+       client client.Client
+       scheme *runtime.Scheme
+}
+
+// Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
+// and what is in the Provisioning.Spec
+// TODO(user): Modify this Reconcile function to implement your Controller logic.  This example creates
+// a Pod as an example
+// Note:
+// The Controller will requeue the Request to be processed again if the returned error is non-nil or
+// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
+func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
+       reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
+       reqLogger.Info("Reconciling Provisioning")
+
+       // Fetch the Provisioning instance
+       provisioningInstance := &bpav1alpha1.Provisioning{}
+       err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
+       if err != nil {
+               if errors.IsNotFound(err) {
+                       // Request object not found, could have been deleted after reconcile request.
+                       // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
+                       // Return and don't requeue
+                       return reconcile.Result{}, nil
+               }
+               // Error reading the object - requeue the request.
+               return reconcile.Result{}, err
+       }
+
+        mastersList := provisioningInstance.Spec.Masters
+        workersList := provisioningInstance.Spec.Workers
+        bareMetalHostList, _ := listBareMetalHosts()
+
+
+        var allString string
+        var masterString string
+        var workerString string
+
+       //Iterate through mastersList and get all the mac addresses and IP addresses
+
+        for _, masterMap := range mastersList {
+
+                for masterLabel, master := range masterMap {
+                   containsMac, bmhCR := checkMACaddress(bareMetalHostList, master.MACaddress)
+                   if containsMac{
+                      //fmt.Println( master.MACaddress)
+                      fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, master.MACaddress)
+
+                      //Get IP address of master
+                      hostIPaddress, err := getHostIPaddress(master.MACaddress, dhcpLeaseFile )
+                     if err != nil {
+                        fmt.Printf("IP address not found for host with MAC address %s \n", master.MACaddress)
+                     }
+
+
+
+                      allString += masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
+                      masterString += masterLabel + "\n"
+
+                      fmt.Printf("%s : %s \n", hostIPaddress, master.MACaddress)
+
+
+
+                   } else {
+
+                      fmt.Printf("Host with MAC Address %s not found\n", master.MACaddress)
+                   }
+             }
+        }
+
+
+        //Iterate through workersList and get all the mac addresses
+        for _, workerMap := range workersList {
+
+                for workerLabel, worker := range workerMap {
+                   containsMac, bmhCR := checkMACaddress(bareMetalHostList, worker.MACaddress)
+                   if containsMac{
+                      //fmt.Println( worker.MACaddress)
+                      fmt.Printf("Host %s matches that macAddress\n", bmhCR)
+
+                      //Get IP address of worker
+                      hostIPaddress, err := getHostIPaddress(worker.MACaddress, dhcpLeaseFile )
+                     if err != nil {
+                        fmt.Printf("IP address not found for host with MAC address %s \n", worker.MACaddress)
+                     }
+                      fmt.Printf("%s : %s \n", hostIPaddress, worker.MACaddress)
+
+                      allString += workerLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
+                      workerString += workerLabel + "\n"
+
+                   }else {
+
+                      fmt.Printf("Host with MAC Address %s not found\n", worker.MACaddress)
+                   }
+
+             }
+        }
+
+
+        //Create host.ini file
+        iniHostFilePath := provisioningInstance.Spec.HostsFile
+        newFile, err := os.Create(iniHostFilePath)
+        defer newFile.Close()
+
+
+        if err != nil {
+           fmt.Printf("Error occured while creating file \n %v", err)
+        }
+
+        hostFile, err := ini.Load(iniHostFilePath)
+        if err != nil {
+           fmt.Printf("Error occured while Loading file \n %v", err)
+        }
+
+        _, err = hostFile.NewRawSection("all", allString)
+        if err != nil {
+           fmt.Printf("Error occured while creating section \n %v", err)
+        }
+        _, err = hostFile.NewRawSection("kube-master", masterString)
+        if err != nil {
+           fmt.Printf("Error occured while creating section \n %v", err)
+        }
+
+        _, err = hostFile.NewRawSection("kube-node", workerString)
+        if err != nil {
+           fmt.Printf("Error occured while creating section \n %v", err)
+        }
+
+        _, err = hostFile.NewRawSection("etcd", masterString)
+        if err != nil {
+           fmt.Printf("Error occured while creating section \n %v", err)
+        }
+
+        _, err = hostFile.NewRawSection("k8s-cluser:children", "kube-node\n" + "kube-master")
+        if err != nil {
+           fmt.Printf("Error occured while creating section \n %v", err)
+        }
+
+
+        hostFile.SaveTo(iniHostFilePath)
+
+       //TODO: Test KUD installer part
+       //Copy host.ini file to the right path and install KUD
+       dstIniPath := kudInstallerScript + "/inventory/hosts.ini"
+       kudInstaller(iniHostFilePath, dstIniPath, kudInstallerScript)
+
+       return reconcile.Result{}, nil
+}
+
+
+//Function to Get List containing baremetal hosts
+func listBareMetalHosts() (*unstructured.UnstructuredList, error) {
+
+     //Get Current User and kube config file
+     usr, err := user.Current()
+     if err != nil {
+        fmt.Println("Could not get current user\n")
+        return &unstructured.UnstructuredList{}, err
+     }
+
+     kubeConfig := filepath.Join(usr.HomeDir, ".kube", "config")
+
+     //Build Config Flags
+     config, err :=  clientcmd.BuildConfigFromFlags("", kubeConfig)
+     if err != nil {
+        fmt.Println("Could not build config\n")
+        return &unstructured.UnstructuredList{}, err
+     }
+
+    //Create Dynamic Client  for BareMetalHost CRD
+    bmhDynamicClient, err := dynamic.NewForConfig(config)
+
+    if err != nil {
+       fmt.Println("Could not create dynamic client for bareMetalHosts\n")
+       return &unstructured.UnstructuredList{}, err
+    }
+
+    //Create GVR representing a BareMetalHost CR
+    bmhGVR := schema.GroupVersionResource{
+      Group:    "metal3.io",
+      Version:  "v1alpha1",
+      Resource: "baremetalhosts",
+    }
+
+    //Get List containing all BareMetalHosts CRs
+    bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
+    if err != nil {
+       fmt.Println("Error occured, cannot get BareMetalHosts list\n")
+       return &unstructured.UnstructuredList{}, err
+    }
+
+    return bareMetalHosts, nil
+}
+
+//Function to check if BareMetalHost containing MAC address exist
+func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
+
+     //Convert macAddress to byte array for comparison
+     macAddressByte :=  []byte(macAddress)
+     macBool := false
+
+     for _, bareMetalHost := range bareMetalHostList.Items {
+         bmhJson, _ := bareMetalHost.MarshalJSON()
+
+         macBool = bytes.Contains(bmhJson, macAddressByte)
+         if macBool{
+             return macBool, bareMetalHost.GetName()
+         }
+
+      }
+
+         return macBool, ""
+
+}
+
+func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, error) {
+
+     //Read the dhcp lease file
+     dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
+     if err != nil {
+        fmt.Println("Failed to read lease file\n")
+        return "", err
+     }
+
+     dhcpLeases := string(dhcpFile)
+
+     //Regex to use to search dhcpLeases
+     regex := "lease.*{|ethernet.*"
+     re, err := regexp.Compile(regex)
+     if err != nil {
+        fmt.Println("Could not create Regexp object\n")
+        return "", err
+     }
+
+     //Get String containing leased Ip addresses and Corressponding MAC addresses
+     out := re.FindAllString(dhcpLeases, -1)
+     outString := strings.Join(out, " ")
+     stringReplacer := strings.NewReplacer("lease", "", "{ ethernet ", "", ";", "")
+     replaced := stringReplacer.Replace(outString)
+     ipMacList := strings.Fields(replaced)
+
+
+     //Get IP addresses corresponding to Input MAC Address
+     for idx := len(ipMacList)-1 ; idx >= 0; idx -- {
+         item := ipMacList[idx]
+         if item == macAddress  {
+            ipAdd := ipMacList[idx -1]
+            return ipAdd, nil
+    }
+
+ }
+     return "", nil
+}
+
+func kudInstaller(srcIniPath, dstIniPath, kudInstallerPath string) {
+
+      err := os.Chdir(kudInstallerPath)
+      if err != nil {
+          fmt.Printf("Could not change directory %v", err)
+          return
+        }
+
+      commands := "cp " + srcIniPath + " "  + dstIniPath + "; ./installer.sh| tee kud_installer.log"
+
+      cmd := exec.Command("/bin/bash", "-c", commands)
+      err = cmd.Run()
+
+      if err != nil {
+          fmt.Printf("Error Occured while running KUD install scripts %v", err)
+          return
+        }
+
+      return
+}
diff --git a/cmd/bpa-operator/tools.go b/cmd/bpa-operator/tools.go
new file mode 100644 (file)
index 0000000..648413f
--- /dev/null
@@ -0,0 +1,7 @@
+// +build tools
+
+package tools
+
+import (
+       _ "sigs.k8s.io/controller-tools/pkg/crd/generator"
+)
diff --git a/cmd/bpa-operator/version/version.go b/cmd/bpa-operator/version/version.go
new file mode 100644 (file)
index 0000000..e3e130b
--- /dev/null
@@ -0,0 +1,5 @@
+package version
+
+var (
+       Version = "0.0.1"
+)
diff --git a/cmd/bpa-restapi-agent/Makefile b/cmd/bpa-restapi-agent/Makefile
new file mode 100644 (file)
index 0000000..ba40a6f
--- /dev/null
@@ -0,0 +1,24 @@
+
+# The name of the executable (default is current directory name)
+TARGET := $(shell echo $${PWD\#\#*/})
+.DEFAULT_GOAL: $(TARGET)
+
+# These will be provided to the target
+VERSION := 1.0.0
+BUILD := `git rev-parse HEAD`
+
+# Use linker flags to provide version/build settings to the target
+LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD)"
+
+# go source files, ignore vendor directory
+SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*")
+
+.PHONY: all build
+
+all: build
+
+$(TARGET): $(SRC)
+       @go build $(LDFLAGS) -o $(TARGET)
+
+build: $(TARGET)
+       @true
index e69de29..a74434c 100644 (file)
@@ -0,0 +1,6 @@
+### Running the server
+To run the server, follow these simple steps:
+
+```
+go run main.go
+```
diff --git a/cmd/bpa-restapi-agent/api/api.go b/cmd/bpa-restapi-agent/api/api.go
new file mode 100644 (file)
index 0000000..d70cd80
--- /dev/null
@@ -0,0 +1,54 @@
+// api/api_images.go
+
+
+package api
+
+import (
+       image "bpa-restapi-agent/internal/app"
+
+       "github.com/gorilla/mux"
+)
+
+// NewRouter creates a router that registers the various urls that are supported
+func NewRouter(binaryClient image.ImageManager,
+                                                        containerClient image.ImageManager,
+                                                        osClient image.ImageManager) *mux.Router {
+
+       router := mux.NewRouter()
+
+       //Setup the image uploaad api handler here
+       if binaryClient == nil {
+               binaryClient = image.NewBinaryImageClient()
+       }
+       binaryHandler := imageHandler{client: binaryClient}
+       imgRouter := router.PathPrefix("/v1").Subrouter()
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/binary_images", binaryHandler.createHandler).Methods("POST")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/binary_images/{imgname}", binaryHandler.getHandler).Methods("GET")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/binary_images/{imgname}", binaryHandler.deleteHandler).Methods("DELETE")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/binary_images/{imgname}", binaryHandler.updateHandler).Methods("PUT")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/binary_images/{imgname}", binaryHandler.patchHandler).Methods("PATCH")
+
+       //Setup the _image upload api handler here
+       if containerClient == nil {
+               containerClient = image.NewContainerImageClient()
+       }
+       containerHandler := imageHandler{client: containerClient}
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/container_images", containerHandler.createHandler).Methods("POST")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/container_images/{imgname}", containerHandler.getHandler).Methods("GET")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/container_images/{imgname}", containerHandler.deleteHandler).Methods("DELETE")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/container_images/{imgname}", containerHandler.updateHandler).Methods("PUT")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/container_images/{imgname}", containerHandler.patchHandler).Methods("PATCH")
+
+       //Setup the os_image upload api handler here
+       if osClient == nil {
+               osClient = image.NewOSImageClient()
+       }
+       osHandler := imageHandler{client: osClient}
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/os_images", osHandler.createHandler).Methods("POST")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/os_images/{imgname}", osHandler.getHandler).Methods("GET")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/os_images/{imgname}", osHandler.deleteHandler).Methods("DELETE")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/os_images/{imgname}", osHandler.updateHandler).Methods("PUT")
+       imgRouter.HandleFunc("/baremetalcluster/{owner}/{clustername}/os_images/{imgname}", osHandler.patchHandler).Methods("PATCH")
+
+       return router
+}
diff --git a/cmd/bpa-restapi-agent/api/imagehandler.go b/cmd/bpa-restapi-agent/api/imagehandler.go
new file mode 100644 (file)
index 0000000..0d7b787
--- /dev/null
@@ -0,0 +1,348 @@
+package api
+
+import (
+       "bytes"
+       "encoding/base64"
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "os"
+       "os/user"
+       "log"
+       "path"
+       "strconv"
+
+       image "bpa-restapi-agent/internal/app"
+
+       "github.com/gorilla/mux"
+)
+
+// imageHandler is used to store backend implementations objects
+// Also simplifies mocking for unit testing purposes
+type imageHandler struct {
+       // Interface that implements Image operations
+       // We will set this variable with a mock interface for testing
+       client image.ImageManager
+       dirPath string
+}
+
+// CreateHandler handles creation of the image entry in the database
+
+func (h imageHandler) createHandler(w http.ResponseWriter, r *http.Request) {
+       var v image.Image
+
+       // Implemenation using multipart form
+       // Review and enable/remove at a later date
+       // Set Max size to 16mb here
+       err := r.ParseMultipartForm(16777216)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusUnprocessableEntity)
+               return
+       }
+
+       jsn := bytes.NewBuffer([]byte(r.FormValue("metadata")))
+       err = json.NewDecoder(jsn).Decode(&v)
+       switch {
+       case err == io.EOF:
+               http.Error(w, "Empty body", http.StatusBadRequest)
+               return
+       case err != nil:
+               http.Error(w, err.Error(), http.StatusUnprocessableEntity)
+               return
+       }
+
+       // Name is required.
+       if v.ImageName == "" {
+               http.Error(w, "Missing name in POST request", http.StatusBadRequest)
+               return
+       }
+
+       // Owner is required.
+       if v.Owner == "" {
+               http.Error(w, "Missing Owner in POST request", http.StatusBadRequest)
+               return
+       }
+
+       if v.ImageLength == 0 {
+               e := "Improper upload length"
+               w.WriteHeader(http.StatusBadRequest)
+               w.Write([]byte(e))
+               return
+       }
+
+       //Create file directory
+       dir, err := createFileDir(v.Type)
+       if err != nil {
+               log.Fatal("Error creating file server directory", err)
+       }
+
+       //Read the file section and ignore the header
+       file, _, err := r.FormFile("file")
+       if err != nil {
+               http.Error(w, "Unable to process file", http.StatusUnprocessableEntity)
+               return
+       }
+
+       defer file.Close()
+
+       //Convert the file content to base64 for storage
+       content, err := ioutil.ReadAll(file)
+       if err != nil {
+               http.Error(w, "Unable to read file", http.StatusUnprocessableEntity)
+               return
+       }
+
+       v.Config = base64.StdEncoding.EncodeToString(content)
+
+       ret, err := h.client.Create(v)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+       h.dirPath = dir
+       filePath := path.Join(h.dirPath, v.ImageName)
+       file1, err := os.Create(filePath)
+       if err != nil {
+               e := "Error creating file in filesystem"
+               log.Printf("%s %s\n", e, err)
+               w.WriteHeader(http.StatusInternalServerError)
+               return
+       }
+
+       defer file1.Close()
+
+       w.Header().Set("Content-Type", "application/json")
+       w.WriteHeader(http.StatusCreated)
+       err = json.NewEncoder(w).Encode(ret)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+}
+
+// Create file
+
+func createFileDir(dirName string) (string, error) {
+    u, err := user.Current()
+    if err != nil {
+        log.Println("Error while fetching user home directory", err)
+        return "", err
+    }
+    home := u.HomeDir
+    dirPath := path.Join(home, "images", dirName)
+    err = os.MkdirAll(dirPath, 0744)
+    if err != nil {
+        log.Println("Error while creating file server directory", err)
+        return "", err
+    }
+    return dirPath, nil
+}
+
+// getHandler handles GET operations on a particular name
+// Returns an Image
+func (h imageHandler) getHandler(w http.ResponseWriter, r *http.Request) {
+       vars := mux.Vars(r)
+       // ownerName := vars["owner"]
+       // clusterName := vars["clustername"]
+       imageName := vars["imgname"]
+
+       ret, err := h.client.Get(imageName)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+
+       w.Header().Set("Content-Type", "application/json")
+       w.WriteHeader(http.StatusOK)
+       err = json.NewEncoder(w).Encode(ret)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+}
+
+// deleteHandler handles DELETE operations on a particular record
+func (h imageHandler) deleteHandler(w http.ResponseWriter, r *http.Request) {
+       vars := mux.Vars(r)
+       // ownerName := vars["owner"]
+       // clusterName := vars["clustername"]
+       imageName := vars["imgname"]
+
+       err := h.client.Delete(imageName)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+
+       w.WriteHeader(http.StatusNoContent)
+}
+
+// UpdateHandler handles Update operations on a particular image
+func (h imageHandler) updateHandler(w http.ResponseWriter, r *http.Request) {
+       var v image.Image
+       vars := mux.Vars(r)
+       imageName := vars["imgname"]
+
+       err := r.ParseMultipartForm(16777216)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusUnprocessableEntity)
+               return
+       }
+
+       jsn := bytes.NewBuffer([]byte(r.FormValue("metadata")))
+       err = json.NewDecoder(jsn).Decode(&v)
+       switch {
+       case err == io.EOF:
+               http.Error(w, "Empty body", http.StatusBadRequest)
+               return
+       case err != nil:
+               http.Error(w, err.Error(), http.StatusUnprocessableEntity)
+               return
+       }
+
+       // Name is required.
+       if v.ImageName == "" {
+               http.Error(w, "Missing name in PUT request", http.StatusBadRequest)
+               return
+       }
+
+       // Owner is required.
+       if v.Owner == "" {
+               http.Error(w, "Missing Owner in PUT request", http.StatusBadRequest)
+               return
+       }
+
+       //Read the file section and ignore the header
+       file, _, err := r.FormFile("file")
+       if err != nil {
+               http.Error(w, "Unable to process file", http.StatusUnprocessableEntity)
+               return
+       }
+
+       defer file.Close()
+
+       //Convert the file content to base64 for storage
+       content, err := ioutil.ReadAll(file)
+       if err != nil {
+               http.Error(w, "Unable to read file", http.StatusUnprocessableEntity)
+               return
+       }
+
+       v.Config = base64.StdEncoding.EncodeToString(content)
+
+       ret, err := h.client.Update(imageName, v)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+
+       w.Header().Set("Content-Type", "application/json")
+       w.WriteHeader(http.StatusCreated)
+       err = json.NewEncoder(w).Encode(ret)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+}
+
+// File upload is handled by the patchHandler
+
+func (h imageHandler) patchHandler(w http.ResponseWriter, r *http.Request) {
+       log.Println("going to patch file")
+       vars := mux.Vars(r)
+       imageName := vars["imgname"]
+       file, err := h.client.Get(imageName)
+       if err != nil {
+               http.Error(w, err.Error(), http.StatusInternalServerError)
+               return
+       }
+       if *file.UploadComplete == true {
+               e := "Upload already completed"
+               w.WriteHeader(http.StatusUnprocessableEntity)
+               w.Write([]byte(e))
+               return
+       }
+       off, err := strconv.Atoi(r.Header.Get("Upload-Offset"))
+       if err != nil {
+               log.Println("Improper upload offset", err)
+               w.WriteHeader(http.StatusBadRequest)
+               return
+       }
+       log.Printf("Upload offset %d\n", off)
+       if *file.ImageOffset != off {
+               e := fmt.Sprintf("Expected Offset %d got offset %d", *file.ImageOffset, off)
+               w.WriteHeader(http.StatusConflict)
+               w.Write([]byte(e))
+               return
+       }
+
+       log.Println("Content length is", r.Header.Get("Content-Length"))
+       clh := r.Header.Get("Content-Length")
+       cl, err := strconv.Atoi(clh)
+       if err != nil {
+               log.Println("unknown content length")
+               w.WriteHeader(http.StatusInternalServerError)
+               return
+       }
+
+       if cl != (file.ImageLength - *file.ImageOffset) {
+               e := fmt.Sprintf("Content length doesn't match upload length. Expected content length %d got %d", file.ImageLength-*file.ImageOffset, cl)
+               log.Println(e)
+               w.WriteHeader(http.StatusBadRequest)
+               w.Write([]byte(e))
+               return
+       }
+
+       body, err := ioutil.ReadAll(r.Body)
+       if err != nil {
+               log.Printf("Received file partially %s\n", err)
+               log.Println("Size of received file ", len(body))
+       }
+
+       u, err := user.Current()
+       if err != nil {
+                       log.Println("Error while fetching user home directory", err)
+                       return
+       }
+       home := u.HomeDir
+       dir := path.Join(home, "images", file.Type)
+       h.dirPath = dir
+       fp := fmt.Sprintf("%s/%s", h.dirPath, imageName)
+       f, err := os.OpenFile(fp, os.O_APPEND|os.O_WRONLY, 0644)
+       if err != nil {
+               log.Printf("unable to open file %s\n", err)
+               w.WriteHeader(http.StatusInternalServerError)
+               return
+       }
+       defer f.Close()
+
+       n, err := f.WriteAt(body, int64(off))
+       if err != nil {
+               log.Printf("unable to write %s", err)
+               w.WriteHeader(http.StatusInternalServerError)
+               return
+       }
+       log.Println("number of bytes written ", n)
+       no := *file.ImageOffset + n
+       file.ImageOffset = &no
+
+       uo := strconv.Itoa(*file.ImageOffset)
+       w.Header().Set("Upload-Offset", uo)
+       if *file.ImageOffset == file.ImageLength {
+               log.Println("upload completed successfully")
+               *file.UploadComplete = true
+       }
+
+       // err = h.updateFile(file)
+       // if err != nil {
+       //      log.Println("Error while updating file", err)
+       //      w.WriteHeader(http.StatusInternalServerError)
+       //      return
+       // }
+       w.WriteHeader(http.StatusNoContent)
+
+       return
+
+}
diff --git a/cmd/bpa-restapi-agent/docs/swagger.yaml b/cmd/bpa-restapi-agent/docs/swagger.yaml
new file mode 100644 (file)
index 0000000..a375700
--- /dev/null
@@ -0,0 +1,137 @@
+---
+swagger: "2.0"
+info:
+  description: "Addresses deployment of workloads in the edge"
+  version: "1.0.0"
+  title: "ICN application"
+schemes:
+- "http"
+consumes:
+- "application/json"
+produces:
+- "application/json"
+paths:
+  /:
+    get:
+      tags:
+      - "container_images"
+      operationId: "find_images"
+      parameters:
+      - name: "since"
+        in: "query"
+        required: false
+        type: "integer"
+        format: "int64"
+        x-exportParamName: "Since"
+        x-optionalDataType: "Int64"
+      - name: "limit"
+        in: "query"
+        required: false
+        type: "integer"
+        default: 20
+        format: "int32"
+        x-exportParamName: "Limit"
+        x-optionalDataType: "Int32"
+      responses:
+        200:
+          description: "list the ICN operations"
+          schema:
+            type: "array"
+            items:
+              $ref: "#/definitions/Request"
+        default:
+          description: "generic error response"
+          schema:
+            $ref: "#/definitions/error"
+    post:
+      tags:
+      - "container_images"
+      operationId: "addContainer"
+      parameters:
+      - in: "body"
+        name: "body"
+        required: false
+        schema:
+          $ref: "#/definitions/Request"
+        x-exportParamName: "Body"
+      responses:
+        201:
+          description: "Created"
+          schema:
+            $ref: "#/definitions/Request"
+        default:
+          description: "error"
+          schema:
+            $ref: "#/definitions/error"
+  /{id}:
+    put:
+      tags:
+      - "container_images"
+      operationId: "updateImage"
+      parameters:
+      - name: "id"
+        in: "path"
+        required: true
+        type: "integer"
+        format: "int64"
+        x-exportParamName: "Id"
+      - in: "body"
+        name: "body"
+        required: false
+        schema:
+          $ref: "#/definitions/Request"
+        x-exportParamName: "Body"
+      responses:
+        200:
+          description: "OK"
+          schema:
+            $ref: "#/definitions/Request"
+        default:
+          description: "error"
+          schema:
+            $ref: "#/definitions/error"
+    delete:
+      tags:
+      - "container_images"
+      operationId: "destroyImage"
+      parameters:
+      - name: "id"
+        in: "path"
+        required: true
+        type: "integer"
+        format: "int64"
+        x-exportParamName: "Id"
+      responses:
+        204:
+          description: "Deleted"
+        default:
+          description: "error"
+          schema:
+            $ref: "#/definitions/error"
+definitions:
+  Request:
+    type: "object"
+    properties:
+      image_id:
+        type: "string"
+      repo:
+        type: "string"
+      tag:
+        type: "string"
+      installed:
+        type: "boolean"
+    example:
+      installed: true
+      repo: "repo"
+      tag: "tag"
+      image_id: "image_id"
+  error:
+    type: "object"
+    required:
+    - "message"
+    properties:
+      code:
+        type: "integer"
+        format: "int64"
+      message:
+        type: "string"
diff --git a/cmd/bpa-restapi-agent/go.mod b/cmd/bpa-restapi-agent/go.mod
new file mode 100644 (file)
index 0000000..5f99a12
--- /dev/null
@@ -0,0 +1,16 @@
+module bpa-restapi-agent
+
+go 1.12
+
+require (
+       github.com/go-stack/stack v1.8.0 // indirect
+       github.com/golang/snappy v0.0.1 // indirect
+       github.com/gorilla/handlers v1.4.2
+       github.com/gorilla/mux v1.7.3
+       github.com/pkg/errors v0.8.1
+       github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
+       github.com/xdg/stringprep v1.0.0 // indirect
+       go.mongodb.org/mongo-driver v1.0.4
+       golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
+       golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect
+)
diff --git a/cmd/bpa-restapi-agent/go.sum b/cmd/bpa-restapi-agent/go.sum
new file mode 100644 (file)
index 0000000..50f7957
--- /dev/null
@@ -0,0 +1,25 @@
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=
+github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
+github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
+go.mongodb.org/mongo-driver v1.0.4 h1:bHxbjH6iwh1uInchXadI6hQR107KEbgYsMzoblDONmQ=
+go.mongodb.org/mongo-driver v1.0.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
diff --git a/cmd/bpa-restapi-agent/internal/app/image.go b/cmd/bpa-restapi-agent/internal/app/image.go
new file mode 100644 (file)
index 0000000..a1beed8
--- /dev/null
@@ -0,0 +1,194 @@
+package app
+
+import (
+  //"encoding/base64"
+       "encoding/json"
+       //"io/ioutil"
+
+       "bpa-restapi-agent/internal/db"
+
+       pkgerrors "github.com/pkg/errors"
+)
+
+// Image contains the parameters needed for Image information
+type Image struct {
+       Owner                           string               `json:"owner"`
+       ClusterName         string               `json:"cluster_name"`
+       Type                string               `json:"type"`
+       ImageName           string               `json:"image_name"`
+       Config                                                  string                                                   `json:"config"`
+       ImageOffset                                     *int                                                       `json:"image_offset"`
+       ImageLength                                     int                                                                      `json:"image_length"`
+       UploadComplete                  *bool                                                            `json:"upload_complete"`
+       Description         ImageRecordList      `json:"description"`
+}
+
+type ImageRecordList struct {
+       ImageRecords []map[string]string `json:"image_records"`
+}
+
+// ImageKey is the key structure that is used in the database
+type ImageKey struct {
+       // Owner            string     `json:"owner"`
+       // ClusterName      string     `json:"cluster_name"`
+       ImageName        string     `json:"image_name"`
+}
+
+// We will use json marshalling to convert to string to
+// preserve the underlying structure.
+func (dk ImageKey) String() string {
+       out, err := json.Marshal(dk)
+       if err != nil {
+               return ""
+       }
+
+       return string(out)
+}
+
+// ImageManager is an interface that exposes the Image functionality
+type ImageManager interface {
+       Create(c Image) (Image, error)
+       Get(imageName string) (Image, error)
+       Delete(imageName string) error
+       Update(imageName string, c Image) (Image, error)
+       GetImageRecordByName(imgname, imageName string) (map[string]string, error)
+}
+
+// ImageClient implements the ImageManager
+// It will also be used to maintain some localized state
+type ImageClient struct {
+       storeName string
+       tagMeta   string
+}
+
+// To Do - Fix repetition in
+// NewImageClient returns an instance of the ImageClient
+// which implements the ImageManager
+func NewBinaryImageClient() *ImageClient {
+       return &ImageClient{
+               storeName: "binary_image",
+               tagMeta:   "metadata",
+       }
+}
+
+func NewContainerImageClient() *ImageClient {
+       return &ImageClient{
+               storeName: "container_image",
+               tagMeta:   "metadata",
+       }
+}
+
+func NewOSImageClient() *ImageClient {
+       return &ImageClient{
+               storeName: "os_image",
+               tagMeta:   "metadata",
+       }
+}
+
+// Create an entry for the Image resource in the database`
+func (v *ImageClient) Create(c Image) (Image, error) {
+
+       //Construct composite key consisting of name
+       key := ImageKey{
+               // Owner:       c.Owner,
+               // ClusterName: c.ClusterName,
+               ImageName: c.ImageName,
+       }
+
+       //Check if this Image already exists
+       _, err := v.Get(c.ImageName)
+       if err == nil {
+               return Image{}, pkgerrors.New("Image already exists")
+       }
+
+       err = db.DBconn.Create(v.storeName, key, v.tagMeta, c)
+       if err != nil {
+               return Image{}, pkgerrors.Wrap(err, "Creating DB Entry")
+       }
+
+       return c, nil
+}
+
+// Get returns Image for corresponding to name
+func (v *ImageClient) Get(imageName string) (Image, error) {
+
+       //Construct the composite key to select the entry
+       key := ImageKey{
+               // Owner:       ownerName,
+               // ClusterName: clusterName,
+               ImageName: imageName,
+       }
+
+       value, err := db.DBconn.Read(v.storeName, key, v.tagMeta)
+       if err != nil {
+               return Image{}, pkgerrors.Wrap(err, "Get Image")
+       }
+
+       //value is a byte array
+       if value != nil {
+               c := Image{}
+               err = db.DBconn.Unmarshal(value, &c)
+               if err != nil {
+                       return Image{}, pkgerrors.Wrap(err, "Unmarshaling Value")
+               }
+               return c, nil
+       }
+
+       return Image{}, pkgerrors.New("Error getting Connection")
+}
+
+func (v *ImageClient) GetImageRecordByName(imgName string,
+       imageRecordName string) (map[string]string, error) {
+
+       img, err := v.Get(imgName)
+       if err != nil {
+               return nil, pkgerrors.Wrap(err, "Error getting image")
+       }
+
+       for _, value := range img.Description.ImageRecords {
+               if imageRecordName == value["image_record_name"] {
+                       return value, nil
+               }
+       }
+
+       return nil, pkgerrors.New("Image record " + imageRecordName + " not found")
+}
+
+// Delete the Image from database
+func (v *ImageClient) Delete(imageName string) error {
+
+       //Construct the composite key to select the entry
+       key := ImageKey{
+               // Owner:       ownerName,
+               // ClusterName: clusterName,
+               ImageName: imageName,
+       }
+       err := db.DBconn.Delete(v.storeName, key, v.tagMeta)
+       if err != nil {
+               return pkgerrors.Wrap(err, "Delete Image")
+       }
+       return nil
+}
+
+// Update an entry for the image in the database
+func (v *ImageClient) Update(imageName string, c Image) (Image, error) {
+
+       key := ImageKey{
+               // Owner:       c.Owner,
+               // ClusterName: c.ClusterName,
+               ImageName: imageName,
+       }
+
+       //Check if this Image exists
+       _, err := v.Get(imageName)
+       if err != nil {
+               return Image{}, pkgerrors.New("Update Error - Image doesn't exist")
+       }
+
+       err = db.DBconn.Update(v.storeName, key, v.tagMeta, c)
+       if err != nil {
+               return Image{}, pkgerrors.Wrap(err, "Updating DB Entry")
+       }
+
+       return c, nil
+}
diff --git a/cmd/bpa-restapi-agent/internal/config/config.go b/cmd/bpa-restapi-agent/internal/config/config.go
new file mode 100644 (file)
index 0000000..e5d4f48
--- /dev/null
@@ -0,0 +1,56 @@
+package config
+
+import (
+  "encoding/json"
+  "log"
+  "os"
+)
+
+type Configuration struct {
+  Password        string  `json:  "password"`
+  DatabaseAddress string  `json:  "database-address"`
+  DatabaseType    string  `json:  "database-type"`
+  ServicePort     string  `json:  "service-port"`
+}
+
+var gConfig *Configuration
+
+func readConfigFile(file string) (*Configuration, error) {
+  f, err := os.Open(file)
+  if err != nil {
+    return defaultConfiguration(), err
+  }
+  defer f.Close()
+
+  conf := defaultConfiguration()
+
+  decoder := json.NewDecoder(f)
+  err = decoder.Decode(conf)
+  if err != nil {
+    return conf, err
+  }
+
+  return conf, nil
+}
+
+func defaultConfiguration() *Configuration {
+  return &Configuration {
+    Password:           "",
+    DatabaseAddress:    "127.0.0.1",
+    DatabaseType:       "mongo",
+    ServicePort:        "9015",
+  }
+}
+
+func GetConfiguration() *Configuration {
+  if gConfig == nil {
+    conf, err := readConfigFile("ICNconfig.json")
+    if err != nil {
+      log.Println("Error loading config file. Using defaults")
+    }
+
+    gConfig = conf
+  }
+
+  return gConfig
+}
diff --git a/cmd/bpa-restapi-agent/internal/db/mongo.go b/cmd/bpa-restapi-agent/internal/db/mongo.go
new file mode 100644 (file)
index 0000000..454f26c
--- /dev/null
@@ -0,0 +1,379 @@
+package db
+
+import (
+       "golang.org/x/net/context"
+       "log"
+
+       "bpa-restapi-agent/internal/config"
+
+       pkgerrors "github.com/pkg/errors"
+       "go.mongodb.org/mongo-driver/bson"
+       "go.mongodb.org/mongo-driver/bson/primitive"
+       "go.mongodb.org/mongo-driver/mongo"
+       "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+// MongoCollection defines the a subset of MongoDB operations
+// Note: This interface is defined mainly for mock testing
+type MongoCollection interface {
+       InsertOne(ctx context.Context, document interface{},
+               opts ...*options.InsertOneOptions) (*mongo.InsertOneResult, error)
+       FindOne(ctx context.Context, filter interface{},
+               opts ...*options.FindOneOptions) *mongo.SingleResult
+       FindOneAndUpdate(ctx context.Context, filter interface{},
+               update interface{}, opts ...*options.FindOneAndUpdateOptions) *mongo.SingleResult
+       DeleteOne(ctx context.Context, filter interface{},
+               opts ...*options.DeleteOptions) (*mongo.DeleteResult, error)
+       Find(ctx context.Context, filter interface{},
+               opts ...*options.FindOptions) (*mongo.Cursor, error)
+}
+
+// MongoStore is an implementation of the db.Store interface
+type MongoStore struct {
+       db *mongo.Database
+}
+
+// This exists only for allowing us to mock the collection object
+// for testing purposes
+var getCollection = func(coll string, m *MongoStore) MongoCollection {
+       return m.db.Collection(coll)
+}
+
+// This exists only for allowing us to mock the DecodeBytes function
+// Mainly because we cannot construct a SingleResult struct from our
+// tests. All fields in that struct are private.
+var decodeBytes = func(sr *mongo.SingleResult) (bson.Raw, error) {
+       return sr.DecodeBytes()
+}
+
+// These exists only for allowing us to mock the cursor.Next function
+// Mainly because we cannot construct a mongo.Cursor struct from our
+// tests. All fields in that struct are private and there is no public
+// constructor method.
+var cursorNext = func(ctx context.Context, cursor *mongo.Cursor) bool {
+       return cursor.Next(ctx)
+}
+var cursorClose = func(ctx context.Context, cursor *mongo.Cursor) error {
+       return cursor.Close(ctx)
+}
+
+// NewMongoStore initializes a Mongo Database with the name provided
+// If a database with that name exists, it will be returned
+func NewMongoStore(name string, store *mongo.Database) (Store, error) {
+       if store == nil {
+               ip := "mongodb://" + config.GetConfiguration().DatabaseAddress + ":27017"
+               clientOptions := options.Client()
+               clientOptions.ApplyURI(ip)
+               mongoClient, err := mongo.NewClient(clientOptions)
+               if err != nil {
+                       return nil, err
+               }
+
+               err = mongoClient.Connect(context.Background())
+               if err != nil {
+                       return nil, err
+               }
+               store = mongoClient.Database(name)
+       }
+
+       return &MongoStore{
+               db: store,
+       }, nil
+}
+
+// HealthCheck verifies if the database is up and running
+func (m *MongoStore) HealthCheck() error {
+
+       _, err := decodeBytes(m.db.RunCommand(context.Background(), bson.D{{"serverStatus", 1}}))
+       if err != nil {
+               return pkgerrors.Wrap(err, "Error getting server status")
+       }
+
+       return nil
+}
+
+// validateParams checks to see if any parameters are empty
+func (m *MongoStore) validateParams(args ...interface{}) bool {
+       for _, v := range args {
+               val, ok := v.(string)
+               if ok {
+                       if val == "" {
+                               return false
+                       }
+               } else {
+                       if v == nil {
+                               return false
+                       }
+               }
+       }
+
+       return true
+}
+
+// Create is used to create a DB entry
+func (m *MongoStore) Create(coll string, key Key, tag string, data interface{}) error {
+       if data == nil || !m.validateParams(coll, key, tag) {
+               return pkgerrors.New("No Data to store")
+       }
+
+       c := getCollection(coll, m)
+       ctx := context.Background()
+
+       //Insert the data and then add the objectID to the masterTable
+       res, err := c.InsertOne(ctx, bson.D{
+               {tag, data},
+       })
+       if err != nil {
+               return pkgerrors.Errorf("Error inserting into database: %s", err.Error())
+       }
+
+       //Add objectID of created data to masterKey document
+       //Create masterkey document if it does not exist
+       filter := bson.D{{"key", key}}
+
+       _, err = decodeBytes(
+               c.FindOneAndUpdate(
+                       ctx,
+                       filter,
+                       bson.D{
+                               {"$set", bson.D{
+                                       {tag, res.InsertedID},
+                               }},
+                       },
+                       options.FindOneAndUpdate().SetUpsert(true).SetReturnDocument(options.After)))
+
+       if err != nil {
+               return pkgerrors.Errorf("Error updating master table: %s", err.Error())
+       }
+
+       return nil
+}
+
+// Update is used to update a DB entry
+func (m *MongoStore) Update(coll string, key Key, tag string, data interface{}) error {
+       if data == nil || !m.validateParams(coll, key, tag) {
+               return pkgerrors.New("No Data to update")
+       }
+
+       c := getCollection(coll, m)
+       ctx := context.Background()
+
+       //Get the masterkey document based on given key
+       filter := bson.D{{"key", key}}
+       keydata, err := decodeBytes(c.FindOne(context.Background(), filter))
+       if err != nil {
+               return pkgerrors.Errorf("Error finding master table: %s", err.Error())
+       }
+
+       //Read the tag objectID from document
+       tagoid, ok := keydata.Lookup(tag).ObjectIDOK()
+       if !ok {
+               return pkgerrors.Errorf("Error finding objectID for tag %s", tag)
+       }
+
+       //Update the document with new data
+       filter = bson.D{{"_id", tagoid}}
+
+       _, err = decodeBytes(
+               c.FindOneAndUpdate(
+                       ctx,
+                       filter,
+                       bson.D{
+                               {"$set", bson.D{
+                                       {tag, data},
+                               }},
+                       },
+                       options.FindOneAndUpdate().SetReturnDocument(options.After)))
+
+       if err != nil {
+               return pkgerrors.Errorf("Error updating record: %s", err.Error())
+       }
+
+       return nil
+}
+
+// Unmarshal implements an unmarshaler for bson data that
+// is produced from the mongo database
+func (m *MongoStore) Unmarshal(inp []byte, out interface{}) error {
+       err := bson.Unmarshal(inp, out)
+       if err != nil {
+               return pkgerrors.Wrap(err, "Unmarshaling bson")
+       }
+       return nil
+}
+
+// Read method returns the data stored for this key and for this particular tag
+func (m *MongoStore) Read(coll string, key Key, tag string) ([]byte, error) {
+       if !m.validateParams(coll, key, tag) {
+               return nil, pkgerrors.New("Mandatory fields are missing")
+       }
+
+       c := getCollection(coll, m)
+       ctx := context.Background()
+
+       //Get the masterkey document based on given key
+       filter := bson.D{{"key", key}}
+       keydata, err := decodeBytes(c.FindOne(context.Background(), filter))
+       if err != nil {
+               return nil, pkgerrors.Errorf("Error finding master table: %s", err.Error())
+       }
+
+       //Read the tag objectID from document
+       tagoid, ok := keydata.Lookup(tag).ObjectIDOK()
+       if !ok {
+               return nil, pkgerrors.Errorf("Error finding objectID for tag %s", tag)
+       }
+
+       //Use tag objectID to read the data from store
+       filter = bson.D{{"_id", tagoid}}
+       tagdata, err := decodeBytes(c.FindOne(ctx, filter))
+       if err != nil {
+               return nil, pkgerrors.Errorf("Error reading found object: %s", err.Error())
+       }
+
+       //Return the data as a byte array
+       //Convert string data to byte array using the built-in functions
+       switch tagdata.Lookup(tag).Type {
+       case bson.TypeString:
+               return []byte(tagdata.Lookup(tag).StringValue()), nil
+       default:
+               return tagdata.Lookup(tag).Value, nil
+       }
+}
+
+// Helper function that deletes an object by its ID
+func (m *MongoStore) deleteObjectByID(coll string, objID primitive.ObjectID) error {
+
+       c := getCollection(coll, m)
+       ctx := context.Background()
+
+       _, err := c.DeleteOne(ctx, bson.D{{"_id", objID}})
+       if err != nil {
+               return pkgerrors.Errorf("Error Deleting from database: %s", err.Error())
+       }
+
+       log.Printf("Deleted Obj with ID %s", objID.String())
+       return nil
+}
+
+// Delete method removes a document from the Database that matches key
+// TODO: delete all referenced docs if tag is empty string
+func (m *MongoStore) Delete(coll string, key Key, tag string) error {
+       if !m.validateParams(coll, key, tag) {
+               return pkgerrors.New("Mandatory fields are missing")
+       }
+
+       c := getCollection(coll, m)
+       ctx := context.Background()
+
+       //Get the masterkey document based on given key
+       filter := bson.D{{"key", key}}
+       //Remove the tag ID entry from masterkey table
+       update := bson.D{
+               {
+                       "$unset", bson.D{
+                               {tag, ""},
+                       },
+               },
+       }
+       keydata, err := decodeBytes(c.FindOneAndUpdate(ctx, filter, update,
+               options.FindOneAndUpdate().SetReturnDocument(options.Before)))
+       if err != nil {
+               //No document was found. Return nil.
+               if err == mongo.ErrNoDocuments {
+                       return nil
+               }
+               //Return any other error that was found.
+               return pkgerrors.Errorf("Error decoding master table after update: %s",
+                       err.Error())
+       }
+
+       //Read the tag objectID from document
+       elems, err := keydata.Elements()
+       if err != nil {
+               return pkgerrors.Errorf("Error reading elements from database: %s", err.Error())
+       }
+
+       tagoid, ok := keydata.Lookup(tag).ObjectIDOK()
+       if !ok {
+               return pkgerrors.Errorf("Error finding objectID for tag %s", tag)
+       }
+
+       //Use tag objectID to read the data from store
+       err = m.deleteObjectByID(coll, tagoid)
+       if err != nil {
+               return pkgerrors.Errorf("Error deleting from database: %s", err.Error())
+       }
+
+       //Delete master table if no more tags left
+       //_id, key and tag should be elements in before doc
+       //if master table needs to be removed too
+       if len(elems) == 3 {
+               keyid, ok := keydata.Lookup("_id").ObjectIDOK()
+               if !ok {
+                       return pkgerrors.Errorf("Error finding objectID for key %s", key)
+               }
+               err = m.deleteObjectByID(coll, keyid)
+               if err != nil {
+                       return pkgerrors.Errorf("Error deleting master table from database: %s", err.Error())
+               }
+       }
+
+       return nil
+}
+
+// ReadAll is used to get all documents in db of a particular tag
+func (m *MongoStore) ReadAll(coll, tag string) (map[string][]byte, error) {
+       if !m.validateParams(coll, tag) {
+               return nil, pkgerrors.New("Missing collection or tag name")
+       }
+
+       c := getCollection(coll, m)
+       ctx := context.Background()
+
+       //Get all master tables in this collection
+       filter := bson.D{
+               {"key", bson.D{
+                       {"$exists", true},
+               }},
+       }
+       cursor, err := c.Find(ctx, filter)
+       if err != nil {
+               return nil, pkgerrors.Errorf("Error reading from database: %s", err.Error())
+       }
+       defer cursorClose(ctx, cursor)
+
+       //Iterate over all the master tables
+       result := make(map[string][]byte)
+       for cursorNext(ctx, cursor) {
+               d := cursor.Current
+
+               //Read key of each master table
+               key, ok := d.Lookup("key").DocumentOK()
+               if !ok {
+                       //Throw error if key is not found
+                       pkgerrors.New("Unable to read key from mastertable")
+               }
+
+               //Get objectID of tag document
+               tid, ok := d.Lookup(tag).ObjectIDOK()
+               if !ok {
+                       log.Printf("Did not find tag: %s", tag)
+                       continue
+               }
+
+               //Find tag document and unmarshal it into []byte
+               tagData, err := decodeBytes(c.FindOne(ctx, bson.D{{"_id", tid}}))
+               if err != nil {
+                       log.Printf("Unable to decode tag data %s", err.Error())
+                       continue
+               }
+               result[key.String()] = tagData.Lookup(tag).Value
+       }
+
+       if len(result) == 0 {
+               return result, pkgerrors.Errorf("Did not find any objects with tag: %s", tag)
+       }
+
+       return result, nil
+}
diff --git a/cmd/bpa-restapi-agent/internal/db/store.go b/cmd/bpa-restapi-agent/internal/db/store.go
new file mode 100644 (file)
index 0000000..0b981e7
--- /dev/null
@@ -0,0 +1,75 @@
+package db
+
+import (
+       "encoding/json"
+       "reflect"
+
+       pkgerrors "github.com/pkg/errors"
+)
+
+// DBconn interface used to talk to a concrete Database connection
+var DBconn Store
+
+// Key is an interface that will be implemented by anypackage
+// that wants to use the Store interface. This allows various
+// db backends and key types.
+type Key interface {
+       String() string
+}
+
+// Store is an interface for accessing a database
+type Store interface {
+       // Returns nil if db health is good
+       HealthCheck() error
+
+       // Unmarshal implements any unmarshaling needed for the database
+       Unmarshal(inp []byte, out interface{}) error
+
+       // Creates a new master table with key and links data with tag and
+       // creates a pointer to the newly added data in the master table
+       Create(table string, key Key, tag string, data interface{}) error
+
+       // Reads data for a particular key with specific tag.
+       Read(table string, key Key, tag string) ([]byte, error)
+
+       // Update data for particular key with specific tag
+       Update(table string, key Key, tag string, data interface{}) error
+
+       // Deletes a specific tag data for key.
+       // TODO: If tag is empty, it will delete all tags under key.
+       Delete(table string, key Key, tag string) error
+
+       // Reads all master tables and data from the specified tag in table
+       ReadAll(table string, tag string) (map[string][]byte, error)
+}
+
+// CreateDBClient creates the DB client
+func CreateDBClient(dbType string) error {
+       var err error
+       switch dbType {
+       case "mongo":
+               // create a mongodb database with ICN as the name
+               DBconn, err = NewMongoStore("icn", nil)
+       default:
+               return pkgerrors.New(dbType + "DB not supported")
+       }
+       return err
+}
+
+// Serialize converts given data into a JSON string
+func Serialize(v interface{}) (string, error) {
+       out, err := json.Marshal(v)
+       if err != nil {
+               return "", pkgerrors.Wrap(err, "Error serializing "+reflect.TypeOf(v).String())
+       }
+       return string(out), nil
+}
+
+// DeSerialize converts string to a json object specified by type
+func DeSerialize(str string, v interface{}) error {
+       err := json.Unmarshal([]byte(str), &v)
+       if err != nil {
+               return pkgerrors.Wrap(err, "Error deSerializing "+str)
+       }
+       return nil
+}
diff --git a/cmd/bpa-restapi-agent/internal/utils.go b/cmd/bpa-restapi-agent/internal/utils.go
new file mode 100644 (file)
index 0000000..b590789
--- /dev/null
@@ -0,0 +1,33 @@
+package utils
+
+import(
+  //"log"
+  "bpa-restapi-agent/internal/db"
+  "bpa-restapi-agent/internal/config"
+  pkgerrors "github.com/pkg/errors"
+)
+
+func CheckDatabaseConnection() error {
+// To Do - Implement db and config
+
+  err := db.CreateDBClient(config.GetConfiguration().DatabaseType)
+  if err != nil {
+    return pkgerrors.Cause(err)
+  }
+
+  err = db.DBconn.HealthCheck()
+  if err != nil {
+    return pkgerrors.Cause(err)
+  }
+
+  return nil
+}
+
+func CheckInitialSettings() error {
+  err := CheckDatabaseConnection()
+  if err != nil {
+    return pkgerrors.Cause(err)
+  }
+
+  return nil
+}
diff --git a/cmd/bpa-restapi-agent/main.go b/cmd/bpa-restapi-agent/main.go
new file mode 100644 (file)
index 0000000..6a8960b
--- /dev/null
@@ -0,0 +1,54 @@
+// main.go
+package main
+
+import (
+  "context"
+  "log"
+  "math/rand"
+  "net/http"
+  "os"
+  "os/signal"
+  "time"
+
+  //To Do - Implement internal for checking config
+  "github.com/gorilla/handlers"
+
+  "bpa-restapi-agent/api"
+  utils "bpa-restapi-agent/internal"
+  "bpa-restapi-agent/internal/config"
+)
+
+func main() {
+  // To Do - Implement initial settings
+  // check initial config
+  err := utils.CheckInitialSettings()
+  if err != nil{
+    log.Fatal(err)
+  }
+
+  rand.Seed(time.Now().UnixNano())
+
+  httpRouter := api.NewRouter(nil, nil, nil)
+  // Return http.handler and log requests to Stdout
+  loggedRouter := handlers.LoggingHandler(os.Stdout, httpRouter)
+  log.Println("Starting Integrated Cloud Native API")
+
+  // Create custom http server
+  httpServer := &http.Server{
+    Handler: loggedRouter,
+    // To Do - Implement config
+    Addr:    ":" + config.GetConfiguration().ServicePort,
+  }
+  connectionsClose := make(chan struct{})
+  go func() {
+    c := make(chan os.Signal, 1) // create c channel to receive notifications
+    signal.Notify(c, os.Interrupt) // register c channel to run concurrently
+    <-c
+    httpServer.Shutdown(context.Background())
+    close(connectionsClose)
+  }()
+
+  // Start server
+  log.Fatal(httpServer.ListenAndServe())
+
+}
diff --git a/cmd/bpa-restapi-agent/sample.json b/cmd/bpa-restapi-agent/sample.json
new file mode 100644 (file)
index 0000000..97c2125
--- /dev/null
@@ -0,0 +1,18 @@
+{
+  "owner":  "alpha",
+  "cluster_name": "beta",
+  "type": "container",
+  "image_name": "asdf246",
+  "image_length": 21579557,
+  "image_offset": 0,
+  "upload_complete":  false,
+  "description": {
+    "image_records":  [
+      {
+        "image_record_name": "iuysdi1234",
+        "repo": "java",
+        "tag":  "8"
+      }
+    ]
+  }
+}
index e69de29..1ce151b 100644 (file)
@@ -0,0 +1,147 @@
+## Intel Rook infrastructure for Ceph cluster deployment
+
+By default create osd on folder /var/lib/rook/storage-dir, and Ceph cluster
+information on /var/lib/rook.
+
+# Precondition
+
+1. Compute node disk space: 20GB+ free disk space.
+
+2. Kubernetes version: Kubernetes version >= 1.13 required by Ceph CSI v1.0.
+Following is the upgrade patch in kud github: https://github.com/onap/multicloud-k8s
+
+```
+$ git diff
+diff --git a/kud/deployment_infra/playbooks/kud-vars.yml b/kud/deployment_infra/playbooks/kud-vars.yml
+index 9b36547..5c29fa4 100644
+--- a/kud/deployment_infra/playbooks/kud-vars.yml
++++ b/kud/deployment_infra/playbooks/kud-vars.yml
+@@ -58,7 +58,7 @@ ovn4nfv_version: adc7b2d430c44aa4137ac7f9420e14cfce3fa354
+ ovn4nfv_url: "https://git.opnfv.org/ovn4nfv-k8s-plugin/"
+
+ go_version: '1.12.5'
+-kubespray_version: 2.8.2
+-helm_client_version: 2.9.1
++kubespray_version: 2.9.0
++helm_client_version: 2.13.1
+ # kud playbooks not compatible with 2.8.0 - see MULTICLOUD-634
+ ansible_version: 2.7.10
+diff --git a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+index 9966ba8..cacb4b3 100644
+--- a/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
++++ b/kud/hosting_providers/vagrant/inventory/group_vars/k8s-cluster.yml
+@@ -48,7 +48,7 @@ local_volumes_enabled: true
+ local_volume_provisioner_enabled: true
+
+ ## Change this to use another Kubernetes version, e.g. a current beta release
+-kube_version: v1.12.3
++kube_version: v1.13.5
+
+ # Helm deployment
+ helm_enabled: true
+```
+
+After upgraded, the Kubernetes version as following:
+```
+$ kubectl version
+Client Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.5", GitCommit:"2166946f41b36dea2c4626f90a77706f426cdea2", GitTreeState:"clean", BuildDate:"2019-03-25T15:19:22Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
+Server Version: version.Info{Major:"1", Minor:"13", GitVersion:"v1.13.5", GitCommit:"2166946f41b36dea2c4626f90a77706f426cdea2", GitTreeState:"clean", BuildDate:"2019-03-25T15:19:22Z", GoVersion:"go1.11.5", Compiler:"gc", Platform:"linux/amd64"}
+```
+
+If something is wrong with Kubectl server version, you can manually upgrade as
+command:
+```console
+$ kubeadm upgrade apply v1.13.5
+```
+
+# Deployment
+
+To bring up Rook operator(v1.0) and Ceph cluster(Mimic 13.2.2) as following:
+
+```console
+cd yaml
+./install.sh
+```
+
+# Test
+
+If you want to make a test on the ceph sample workload, check as following:
+
+1. Bring up Rook operator and Ceph cluster.
+2. Goto Create storage class.
+
+```console
+kubectl create -f ./test/rbd/storageclass.yaml
+```
+
+3. Create RBD secret.
+```console
+kubectl exec -ti -n rook-ceph rook-ceph-operator-948f8f84c-749zb -- bash -c 
+"ceph -c /var/lib/rook/rook-ceph/rook-ceph.config auth get-or-create-key client.kube mon \"allow profile rbd\" osd \"profile rbd pool=rbd\""
+```
+   You need to replace the pod name with your own rook-operator, refer: kubetl get pod -n rook-ceph
+   Then get secret of admin and client user key by go into operator pod and execute:
+```console
+ceph auth get-key client.admin|base64
+ceph auth get-key client.kube|base64
+```
+  Then fill the key into secret.yaml
+```console
+kubectl create -f ./test/rbd/secret.yaml
+```
+4. Create RBD Persistent Volume Claim
+```console
+kubectl create -f ./test/rbd/pvc.yaml
+```
+5. Create RBD demo pod
+```console
+kubectl creaet -f ./test/rbd/pod.yaml
+```
+6. Check the Volumes created and application mount status
+```console
+tingjie@ceph4:~/bohemian/workspace/rook/Documentation$ kubectl get pvc
+NAME      STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+rbd-pvc   Bound    pvc-98f50bec-8a4f-434d-8def-7b69b628d427   1Gi        RWO            csi-rbd        84m
+tingjie@ceph4:~/bohemian/workspace/rook/Documentation$ kubectl get pod
+NAME              READY   STATUS    RESTARTS   AGE
+csirbd-demo-pod   1/1     Running   0          84m
+tingjie@ceph4:~/bohemian/workspace/rook/Documentation$ kubectl exec -ti csirbd-demo-pod -- bash
+root@csirbd-demo-pod:/# df -h
+Filesystem      Size  Used Avail Use% Mounted on
+overlay         733G   35G  662G   5% /
+tmpfs            64M     0   64M   0% /dev
+tmpfs            32G     0   32G   0% /sys/fs/cgroup
+/dev/sda2       733G   35G  662G   5% /etc/hosts
+shm              64M     0   64M   0% /dev/shm
+/dev/rbd0       976M  2.6M  958M   1% /var/lib/www/html
+tmpfs            32G   12K   32G   1% /run/secrets/kubernetes.io/serviceaccount
+tmpfs            32G     0   32G   0% /proc/acpi
+tmpfs            32G     0   32G   0% /proc/scsi
+tmpfs            32G     0   32G   0% /sys/firmware
+```
+7. Create RBD snapshot-class
+```console
+kubectl create -f ./test/rbd/snapshotclass.yaml
+```
+8. Create Volume snapshot and verify
+```console
+kubectl create -f ./test/rbd/snapshot.yaml
+
+$ kubectl get volumesnapshotclass
+NAME                      AGE
+csi-rbdplugin-snapclass   51s
+$ kubectl get volumesnapshot
+NAME               AGE
+rbd-pvc-snapshot   33s
+
+```
+9. Restore the snapshot to a new PVC and verify
+```console
+kubectl create -f ./test/rbd/pvc-restore.yaml
+
+$ kubectl get pvc
+NAME              STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
+rbd-pvc           Bound    pvc-98f50bec-8a4f-434d-8def-7b69b628d427   1Gi        RWO            csi-rbd        42h
+rbd-pvc-restore   Bound    pvc-530a4939-e4c0-428d-a072-c9c39d110d7a   1Gi        RWO            csi-rbd        5s
+```
+
diff --git a/deploy/kud-plugin-addons/rook/yaml/collect_rook_yaml.sh b/deploy/kud-plugin-addons/rook/yaml/collect_rook_yaml.sh
new file mode 100755 (executable)
index 0000000..1499c3f
--- /dev/null
@@ -0,0 +1,34 @@
+#!/bin/bash
+
+# usage: collect_rook_yaml.sh [target]
+
+set -ex
+
+if [ $# -ne 1 ] ; then
+    echo "Please input the target folder!"
+    exit 0
+fi
+
+VER="0.1"
+MKDIR_P="mkdir -p"
+target=$1
+temp=rook_yaml
+
+# copy to target
+$MKDIR_P $temp
+cp rook-common.yaml $temp/
+cp rook-operator-with-csi.yaml $temp/
+cp rook-ceph-cluster.yaml $temp/
+cp rook-toolbox.yaml $temp/
+cp -rf ./csi/ $temp/
+cp -rf ./test/ $temp/
+cp install.sh $temp/
+
+if [ ! -d $target/yaml ]; then
+    $MKDIR_P $target/yaml;
+fi;
+
+tar czvf $target/yaml/rook_yaml-$VER.tar.gz $temp/
+
+# clear
+rm -rf $temp
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-nodeplugin-rbac.yaml
new file mode 100644 (file)
index 0000000..5fb0bb1
--- /dev/null
@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-plugin-sa
+  namespace: rook-ceph
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-plugin-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: cephfs-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-provisioner-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/cephfs/csi-provisioner-rbac.yaml
new file mode 100644 (file)
index 0000000..fdcc18b
--- /dev/null
@@ -0,0 +1,55 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-cephfs-provisioner-sa
+  namespace: rook-ceph
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-cephfs-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: cephfs-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-cephfs-provisioner-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: cephfs-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-nodeplugin-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-nodeplugin-rbac.yaml
new file mode 100644 (file)
index 0000000..d37d0cc
--- /dev/null
@@ -0,0 +1,53 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-plugin-sa
+  namespace: rook-ceph
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-csi-nodeplugin: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "update"]
+  - apiGroups: [""]
+    resources: ["namespaces"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-nodeplugin
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-plugin-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: rbd-csi-nodeplugin
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-provisioner-rbac.yaml b/deploy/kud-plugin-addons/rook/yaml/csi/rbac/rbd/csi-provisioner-rbac.yaml
new file mode 100644 (file)
index 0000000..028d7bd
--- /dev/null
@@ -0,0 +1,83 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-csi-rbd-provisioner-sa
+  namespace: rook-ceph
+
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-external-provisioner-runner-rules
+  labels:
+    rbac.ceph.rook.io/aggregate-to-rbd-external-provisioner-runner: "true"
+rules:
+  - apiGroups: [""]
+    resources: ["secrets"]
+    verbs: ["get", "list"]
+  - apiGroups: [""]
+    resources: ["persistentvolumes"]
+    verbs: ["get", "list", "watch", "create", "delete", "update"]
+  - apiGroups: [""]
+    resources: ["persistentvolumeclaims"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["volumeattachments"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["nodes"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["storage.k8s.io"]
+    resources: ["storageclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: [""]
+    resources: ["events"]
+    verbs: ["list", "watch", "create", "update", "patch"]
+  - apiGroups: [""]
+    resources: ["endpoints"]
+    verbs: ["get", "create", "update"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshots"]
+    verbs: ["get", "list", "watch", "update"]
+  - apiGroups: [""]
+    resources: ["configmaps"]
+    verbs: ["get", "list", "create", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotcontents"]
+    verbs: ["create", "get", "list", "watch", "update", "delete"]
+  - apiGroups: ["snapshot.storage.k8s.io"]
+    resources: ["volumesnapshotclasses"]
+    verbs: ["get", "list", "watch"]
+  - apiGroups: ["apiextensions.k8s.io"]
+    resources: ["customresourcedefinitions"]
+    verbs: ["create"]
+
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+  name: rbd-csi-provisioner-role
+subjects:
+  - kind: ServiceAccount
+    name: rook-csi-rbd-provisioner-sa
+    namespace: rook-ceph
+roleRef:
+  kind: ClusterRole
+  name: rbd-external-provisioner-runner
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/kud-plugin-addons/rook/yaml/install.sh b/deploy/kud-plugin-addons/rook/yaml/install.sh
new file mode 100755 (executable)
index 0000000..00b9e04
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash
+
+# Make sure kubernetes server is up with network dns
+# kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/62e44c867a2846fefb68bd5f178daf4da3095ccb/Documentation/kube-flannel.yml
+
+# Remove taint if have
+# kubectl taint nodes master node-role.kubernetes.io/master:NoSchedule-
+
+# Remove remaining config files of last deplpyment
+echo ""|sudo -S rm -rf /var/lib/rook/*
+
+# Create common CRD objects
+kubectl create -f rook-common.yaml
+
+# Create rbac, since rook operator is not permitted to create rbac rules, these
+# rules have to be created outside of operator
+kubectl apply -f ./csi/rbac/rbd/
+kubectl apply -f ./csi/rbac/cephfs/
+
+# Start rook ceph operator with csi support
+kubectl create -f rook-operator-with-csi.yaml
+
+# Bring up cluster with default configuration, current Ceph version is:
+# ceph/ceph:v14.2.1-20190430, and create osd with default /dev/sdb on each node
+kubectl create -f rook-ceph-cluster.yaml
+
+# Start toolbox containers with CLI support, to enter the bash env, use command:
+# kubectl -n rook-ceph exec -it $(kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o jsonpath='{.items[0].metadata.name}') bash
+kubectl create -f rook-toolbox.yaml
+
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-ceph-cluster.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-ceph-cluster.yaml
new file mode 100644 (file)
index 0000000..0e1ffba
--- /dev/null
@@ -0,0 +1,125 @@
+#################################################################################################################
+# Define the settings for the rook-ceph cluster with common settings for a production cluster.
+# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
+# in this example. See the documentation for more details on storage settings available.
+#################################################################################################################
+
+apiVersion: ceph.rook.io/v1
+kind: CephCluster
+metadata:
+  name: rook-ceph
+  namespace: rook-ceph
+spec:
+  cephVersion:
+    # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
+    # v12 is luminous, v13 is mimic, and v14 is nautilus.
+    # RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
+    # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
+    image: ceph/ceph:v13.2.2-20190410
+    # Whether to allow unsupported versions of Ceph. Currently luminous, mimic and nautilus are supported, with the recommendation to upgrade to nautilus.
+    # Do not set to true in production.
+    allowUnsupported: false
+  # The path on the host where configuration files will be persisted. Must be specified.
+  # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
+  # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
+  dataDirHostPath: /var/lib/rook
+  # set the amount of mons to be started
+  mon:
+    count: 3
+    allowMultiplePerNode: true
+  # enable the ceph dashboard for viewing cluster status
+  dashboard:
+    enabled: true
+    # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
+    # urlPrefix: /ceph-dashboard
+    # serve the dashboard at the given port.
+    # port: 8443
+    # serve the dashboard using SSL
+    # ssl: true
+  network:
+    # toggle to use hostNetwork
+    hostNetwork: false
+  rbdMirroring:
+    # The number of daemons that will perform the rbd mirroring.
+    # rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
+    workers: 0
+  # To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
+  # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
+  # tolerate taints with a key of 'storage-node'.
+#  placement:
+#    all:
+#      nodeAffinity:
+#        requiredDuringSchedulingIgnoredDuringExecution:
+#          nodeSelectorTerms:
+#          - matchExpressions:
+#            - key: role
+#              operator: In
+#              values:
+#              - storage-node
+#      podAffinity:
+#      podAntiAffinity:
+#      tolerations:
+#      - key: storage-node
+#        operator: Exists
+# The above placement information can also be specified for mon, osd, and mgr components
+#    mon:
+#    osd:
+#    mgr:
+  annotations:
+#    all:
+#    mon:
+#    osd:
+# If no mgr annotations are set, prometheus scrape annotations will be set by default.
+#   mgr:
+  resources:
+# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
+#    mgr:
+#      limits:
+#        cpu: "500m"
+#        memory: "1024Mi"
+#      requests:
+#        cpu: "500m"
+#        memory: "1024Mi"
+# The above example requests/limits can also be added to the mon and osd components
+#    mon:
+#    osd:
+  storage: # cluster level storage configuration and selection
+    useAllNodes: true
+    useAllDevices: false
+    deviceFilter:
+    location:
+    config:
+      # The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
+      # Set the storeType explicitly only if it is required not to use the default.
+      # storeType: bluestore
+      metadataDevice: # "md0" specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
+      databaseSizeMB: "10240" # uncomment if the disks are smaller than 100 GB
+      journalSizeMB: "10240"  # uncomment if the disks are 20 GB or smaller
+      # osdsPerDevice: "1" # this value can be overridden at the node or device level
+      # encryptedDevice: "true" # the default value for this option is "false"
+# Cluster level list of directories to use for filestore-based OSD storage. If uncommented, this example would create an OSD under the dataDirHostPath.
+    directories:
+    - path: "/var/lib/rook/storage-dir"
+# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
+# nodes below will be used as storage resources.  Each node's 'name' field should match their 'kubernetes.io/hostname' label.
+#    nodes:
+#    - name: "172.17.4.101"
+#      directories: # specific directories to use for storage can be specified for each node
+#      - path: "/rook/storage-dir"
+#      resources:
+#        limits:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#        requests:
+#          cpu: "500m"
+#          memory: "1024Mi"
+#    - name: "172.17.4.201"
+#      devices: # specific devices to use for storage can be specified for each node
+#      - name: "sdb"
+#      - name: "nvme01" # multiple osds can be created on high performance devices
+#        config:
+#          osdsPerDevice: "5"
+#      config: # configuration can be specified at the node level which overrides the cluster level config
+#        storeType: filestore
+#    - name: "172.17.4.301"
+#      deviceFilter: "^sd."
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-common.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-common.yaml
new file mode 100644 (file)
index 0000000..e6366a0
--- /dev/null
@@ -0,0 +1,618 @@
+###################################################################################################################
+# Create the common resources that are necessary to start the operator and the ceph cluster.
+# These resources *must* be created before the operator.yaml and cluster.yaml or their variants.
+# The samples all assume that a single operator will manage a single cluster crd in the same "rook-ceph" namespace.
+#
+# If the operator needs to manage multiple clusters (in different namespaces), see the section below
+# for "cluster-specific resources". The resources below that section will need to be created for each namespace
+# where the operator needs to manage the cluster. The resources above that section do not be created again.
+###################################################################################################################
+
+# Namespace where the operator and other rook resources are created
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: rook-ceph
+---
+# The CRD declarations
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephclusters.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephCluster
+    listKind: CephClusterList
+    plural: cephclusters
+    singular: cephcluster
+  scope: Namespaced
+  version: v1
+  validation:
+    openAPIV3Schema:
+      properties:
+        spec:
+          properties:
+            cephVersion:
+              properties:
+                allowUnsupported:
+                  type: boolean
+                image:
+                  type: string
+                name:
+                  pattern: ^(luminous|mimic|nautilus)$
+                  type: string
+            dashboard:
+              properties:
+                enabled:
+                  type: boolean
+                urlPrefix:
+                  type: string
+                port:
+                  type: integer
+            dataDirHostPath:
+              pattern: ^/(\S+)
+              type: string
+            mon:
+              properties:
+                allowMultiplePerNode:
+                  type: boolean
+                count:
+                  maximum: 9
+                  minimum: 1
+                  type: integer
+                preferredCount:
+                  maximum: 9
+                  minimum: 0
+                  type: integer
+              required:
+              - count
+            network:
+              properties:
+                hostNetwork:
+                  type: boolean
+            storage:
+              properties:
+                nodes:
+                  items: {}
+                  type: array
+                useAllDevices: {}
+                useAllNodes:
+                  type: boolean
+          required:
+          - mon
+  additionalPrinterColumns:
+    - name: DataDirHostPath
+      type: string
+      description: Directory used on the K8s nodes
+      JSONPath: .spec.dataDirHostPath
+    - name: MonCount
+      type: string
+      description: Number of MONs
+      JSONPath: .spec.mon.count
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+    - name: State
+      type: string
+      description: Current State
+      JSONPath: .status.state
+    - name: Health
+      type: string
+      description: Ceph Health
+      JSONPath: .status.ceph.health
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephfilesystems.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephFilesystem
+    listKind: CephFilesystemList
+    plural: cephfilesystems
+    singular: cephfilesystem
+  scope: Namespaced
+  version: v1
+  additionalPrinterColumns:
+    - name: MdsCount
+      type: string
+      description: Number of MDSs
+      JSONPath: .spec.metadataServer.activeCount
+    - name: Age
+      type: date
+      JSONPath: .metadata.creationTimestamp
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephnfses.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephNFS
+    listKind: CephNFSList
+    plural: cephnfses
+    singular: cephnfs
+    shortNames:
+    - nfs
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstores.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStore
+    listKind: CephObjectStoreList
+    plural: cephobjectstores
+    singular: cephobjectstore
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephobjectstoreusers.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephObjectStoreUser
+    listKind: CephObjectStoreUserList
+    plural: cephobjectstoreusers
+    singular: cephobjectstoreuser
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: cephblockpools.ceph.rook.io
+spec:
+  group: ceph.rook.io
+  names:
+    kind: CephBlockPool
+    listKind: CephBlockPoolList
+    plural: cephblockpools
+    singular: cephblockpool
+  scope: Namespaced
+  version: v1
+---
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: volumes.rook.io
+spec:
+  group: rook.io
+  names:
+    kind: Volume
+    listKind: VolumeList
+    plural: volumes
+    singular: volume
+    shortNames:
+    - rv
+  scope: Namespaced
+  version: v1alpha2
+---
+# The cluster role for managing all the cluster-specific resources in a namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-cluster-mgmt-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-cluster-mgmt: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  - pods
+  - pods/log
+  - services
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - deployments
+  - daemonsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The role for the operator to manage resources in its own namespace
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: Role
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - configmaps
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - apps
+  resources:
+  - daemonsets
+  - statefulsets
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+---
+# The cluster role for managing the Rook CRDs
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules: []
+---
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: rook-ceph-global-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-global: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  # Pod access is needed for fencing
+  - pods
+  # Node access is needed for determining nodes where mons should run
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - events
+    # PVs and PVCs are managed by the Rook provisioner
+  - persistentvolumes
+  - persistentvolumeclaims
+  - endpoints
+  verbs:
+  - get
+  - list
+  - watch
+  - patch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - storage.k8s.io
+  resources:
+  - storageclasses
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+- apiGroups:
+  - rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+---
+# Aspects of ceph-mgr that require cluster-wide access
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+  labels:
+    operator: rook
+    storage-backend: ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster-rules
+  labels:
+    operator: rook
+    storage-backend: ceph
+    rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  - nodes
+  - nodes/proxy
+  verbs:
+  - get
+  - list
+  - watch
+---
+# The rook system service account used by the operator, agent, and discovery pods
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+---
+# Grant the operator, agent, and discovery agents access to resources in the namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-system
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+# Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-global
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-global
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+#################################################################################################################
+# Beginning of cluster-specific resources. The example will assume the cluster will be created in the "rook-ceph"
+# namespace. If you want to create the cluster in a different namespace, you will need to modify these roles
+# and bindings accordingly.
+#################################################################################################################
+# Service account for the Ceph OSDs. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+---
+# Service account for the Ceph Mgr. Must exist and cannot be renamed.
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+rules:
+- apiGroups: [""]
+  resources: ["configmaps"]
+  verbs: [ "get", "list", "watch", "create", "update", "delete" ]
+---
+# Aspects of ceph-mgr that require access to the system namespace
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: rook-ceph
+aggregationRule:
+  clusterRoleSelectors:
+  - matchLabels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules: []
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system-rules
+  namespace: rook-ceph
+  labels:
+      rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-system: "true"
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+---
+# Aspects of ceph-mgr that operate within the cluster's namespace
+kind: Role
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+rules:
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  - services
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - get
+  - list
+  - watch
+  - create
+  - update
+  - delete
+- apiGroups:
+  - ceph.rook.io
+  resources:
+  - "*"
+  verbs:
+  - "*"
+---
+  # Allow the operator to create resources in this cluster's namespace
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-cluster-mgmt
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-cluster-mgmt
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-system
+  namespace: rook-ceph
+---
+# Allow the osd pods in this namespace to work with configmaps
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-osd
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-osd
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-osd
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access the cluster-specific resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: Role
+  name: rook-ceph-mgr
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access the rook system resources necessary for the mgr modules
+kind: RoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-system
+  namespace: rook-ceph
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-system
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
+# Allow the ceph mgr to access cluster-wide resources necessary for the mgr modules
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: rook-ceph-mgr-cluster
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: rook-ceph-mgr-cluster
+subjects:
+- kind: ServiceAccount
+  name: rook-ceph-mgr
+  namespace: rook-ceph
+---
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-operator-with-csi.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-operator-with-csi.yaml
new file mode 100644 (file)
index 0000000..c34b879
--- /dev/null
@@ -0,0 +1,73 @@
+#################################################################################################################
+# The deployment for the rook operator that enables the ceph-csi driver for beta testing.
+# For example, to create the rook-ceph cluster:
+#################################################################################################################
+
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-operator
+  namespace: rook-ceph
+  labels:
+    operator: rook
+    storage-backend: ceph
+spec:
+  selector:
+    matchLabels:
+      app: rook-ceph-operator
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-operator
+    spec:
+      serviceAccountName: rook-ceph-system
+      containers:
+      - name: rook-ceph-operator
+        image: rook/ceph:v1.0.4
+        args: ["ceph", "operator"]
+        volumeMounts:
+        - mountPath: /var/lib/rook
+          name: rook-config
+        - mountPath: /etc/ceph
+          name: default-config-dir
+        env:
+        - name: ROOK_CURRENT_NAMESPACE_ONLY
+          value: "true"
+        # CSI enablement
+        - name: ROOK_CSI_ENABLE_CEPHFS
+          value: "true"
+        - name: ROOK_CSI_CEPHFS_IMAGE
+          value: "quay.io/cephcsi/cephfsplugin:v1.0.0"
+        - name: ROOK_CSI_ENABLE_RBD
+          value: "true"
+        - name: ROOK_CSI_RBD_IMAGE
+          value: "quay.io/cephcsi/rbdplugin:v1.0.0"
+        - name: ROOK_CSI_REGISTRAR_IMAGE
+          value: "quay.io/k8scsi/csi-node-driver-registrar:v1.0.2"
+        - name: ROOK_CSI_PROVISIONER_IMAGE
+          value: "quay.io/k8scsi/csi-provisioner:v1.0.1"
+        - name: ROOK_CSI_SNAPSHOTTER_IMAGE
+          value: "quay.io/k8scsi/csi-snapshotter:v1.0.1"
+        - name: ROOK_CSI_ATTACHER_IMAGE
+          value: "quay.io/k8scsi/csi-attacher:v1.0.1"
+        # The name of the node to pass with the downward API
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        # The pod name to pass with the downward API
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        # The pod namespace to pass with the downward API
+        - name: POD_NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+      volumes:
+      - name: rook-config
+        emptyDir: {}
+      - name: default-config-dir
+        emptyDir: {}
diff --git a/deploy/kud-plugin-addons/rook/yaml/rook-toolbox.yaml b/deploy/kud-plugin-addons/rook/yaml/rook-toolbox.yaml
new file mode 100644 (file)
index 0000000..de442f0
--- /dev/null
@@ -0,0 +1,59 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: rook-ceph-tools
+  namespace: rook-ceph
+  labels:
+    app: rook-ceph-tools
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: rook-ceph-tools
+  template:
+    metadata:
+      labels:
+        app: rook-ceph-tools
+    spec:
+      dnsPolicy: ClusterFirstWithHostNet
+      containers:
+      - name: rook-ceph-tools
+        image: rook/ceph:v1.0.4
+        command: ["/tini"]
+        args: ["-g", "--", "/usr/local/bin/toolbox.sh"]
+        imagePullPolicy: IfNotPresent
+        env:
+          - name: ROOK_ADMIN_SECRET
+            valueFrom:
+              secretKeyRef:
+                name: rook-ceph-mon
+                key: admin-secret
+        securityContext:
+          privileged: true
+        volumeMounts:
+          - mountPath: /dev
+            name: dev
+          - mountPath: /sys/bus
+            name: sysbus
+          - mountPath: /lib/modules
+            name: libmodules
+          - name: mon-endpoint-volume
+            mountPath: /etc/rook
+      # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021
+      hostNetwork: true
+      volumes:
+        - name: dev
+          hostPath:
+            path: /dev
+        - name: sysbus
+          hostPath:
+            path: /sys/bus
+        - name: libmodules
+          hostPath:
+            path: /lib/modules
+        - name: mon-endpoint-volume
+          configMap:
+            name: rook-ceph-mon-endpoints
+            items:
+            - key: data
+              path: mon-endpoints
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/pod.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/pod.yaml
new file mode 100644 (file)
index 0000000..3a75fb9
--- /dev/null
@@ -0,0 +1,17 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: csirbd-demo-pod
+spec:
+  containers:
+   - name: web-server
+     image: nginx
+     volumeMounts:
+       - name: mypvc
+         mountPath: /var/lib/www/html
+  volumes:
+   - name: mypvc
+     persistentVolumeClaim:
+       claimName: rbd-pvc
+       readOnly: false
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc-restore.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc-restore.yaml
new file mode 100644 (file)
index 0000000..1fc02d5
--- /dev/null
@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: rbd-pvc-restore
+spec:
+  storageClassName: csi-rbd
+  dataSource:
+    name: rbd-pvc-snapshot
+    kind: VolumeSnapshot
+    apiGroup: snapshot.storage.k8s.io
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/pvc.yaml
new file mode 100644 (file)
index 0000000..3115642
--- /dev/null
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: rbd-pvc
+spec:
+  accessModes:
+  - ReadWriteOnce
+  resources:
+    requests:
+      storage: 1Gi
+  storageClassName: csi-rbd
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/secret.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/secret.yaml
new file mode 100644 (file)
index 0000000..89c8fe5
--- /dev/null
@@ -0,0 +1,14 @@
+---
+apiVersion: v1
+kind: Secret
+metadata:
+  name: csi-rbd-secret
+  namespace: default
+data:
+  # Key value corresponds to a user name defined in Ceph cluster
+  admin: "QVFCQzExQmRuTVp0RVJBQW9FWDJmQ1RkTFQ1QWZ4SlU0OHFLc3c9PQ=="
+  # Key value corresponds to a user name defined in Ceph cluster
+  kube: "QVFBOHJGTmRzeDluQ3hBQW1zRXJkT3gybWYyTTQxTzVidG9ONlE9PQ=="
+  # if monValueFromSecret is set to "monitors", uncomment the
+  # following and set the mon there
+  #monitors: BASE64-ENCODED-Comma-Delimited-Mons
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshot.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshot.yaml
new file mode 100644 (file)
index 0000000..f8ba153
--- /dev/null
@@ -0,0 +1,10 @@
+---
+apiVersion: snapshot.storage.k8s.io/v1alpha1
+kind: VolumeSnapshot
+metadata:
+  name: rbd-pvc-snapshot
+spec:
+  snapshotClassName: csi-rbdplugin-snapclass
+  source:
+    name: rbd-pvc
+    kind: PersistentVolumeClaim
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshotclass.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/snapshotclass.yaml
new file mode 100644 (file)
index 0000000..03e52da
--- /dev/null
@@ -0,0 +1,11 @@
+---
+apiVersion: snapshot.storage.k8s.io/v1alpha1
+kind: VolumeSnapshotClass
+metadata:
+  name: csi-rbdplugin-snapclass
+snapshotter: rbd.csi.ceph.com
+parameters:
+  pool: rbd
+  monitors: 10.111.122.22:6789/0,10.104.227.175:6789/0,10.98.129.229:6789/0
+  csi.storage.k8s.io/snapshotter-secret-name: csi-rbd-secret
+  csi.storage.k8s.io/snapshotter-secret-namespace: default
diff --git a/deploy/kud-plugin-addons/rook/yaml/test/rbd/storageclass.yaml b/deploy/kud-plugin-addons/rook/yaml/test/rbd/storageclass.yaml
new file mode 100644 (file)
index 0000000..ae8c30d
--- /dev/null
@@ -0,0 +1,45 @@
+apiVersion: ceph.rook.io/v1
+kind: CephBlockPool
+metadata:
+  name: rbd
+  namespace: rook-ceph
+spec:
+  replicated:
+    size: 3
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+   name: csi-rbd
+provisioner: rbd.csi.ceph.com
+parameters:
+    # Comma separated list of Ceph monitors
+    # if using FQDN, make sure csi plugin's dns policy is appropriate.
+    monitors: 10.233.47.29:6789/0,10.233.23.25:6789/0,10.233.48.241:6789/0
+
+    # if "monitors" parameter is not set, driver to get monitors from same
+    # secret as admin/user credentials. "monValueFromSecret" provides the
+    # key in the secret whose value is the mons
+    #monValueFromSecret: "monitors"
+    
+    # Ceph pool into which the RBD image shall be created
+    pool: rbd
+
+    # RBD image format. Defaults to "2".
+    imageFormat: "2"
+
+    # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature.
+    imageFeatures: layering
+    
+    # The secrets have to contain Ceph admin credentials.
+    csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
+    csi.storage.k8s.io/provisioner-secret-namespace: default
+    csi.storage.k8s.io/node-publish-secret-name: csi-rbd-secret
+    csi.storage.k8s.io/node-publish-secret-namespace: default
+
+    # Ceph users for operating RBD
+    adminid: admin
+    userid: kube
+    # uncomment the following to use rbd-nbd as mounter on supported nodes
+    #mounter: rbd-nbd
+reclaimPolicy: Delete
diff --git a/deploy/kud/kud_launch.sh b/deploy/kud/kud_launch.sh
new file mode 100755 (executable)
index 0000000..d7be23c
--- /dev/null
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+if [ ! -d $PWD/multicloud-k8s ]; then
+       git clone https://github.com/onap/multicloud-k8s.git
+fi
+
diff --git a/deploy/metal3/scripts/metal3.sh b/deploy/metal3/scripts/metal3.sh
new file mode 100755 (executable)
index 0000000..1c82260
--- /dev/null
@@ -0,0 +1,166 @@
+#!/bin/bash
+
+LIBDIR="$(dirname "$(dirname "$(dirname "$PWD")")")"
+
+eval "$(go env)"
+
+BM_OPERATOR="${BM_OPERATOR:-https://github.com/metal3-io/baremetal-operator.git}"
+
+source $LIBDIR/env/lib/common.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "This script must be run as root"
+    exit 1
+fi
+
+function get_default_inteface_ipaddress() {
+    local _ip=$1
+    local _default_interface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route)
+    local _ipv4address=$(ip addr show dev $_default_interface | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }')
+    eval $_ip="'$_ipv4address'"
+}
+
+create_ssh_key() {
+       #ssh key for compute node to communicate back to bootstrap server
+       mkdir -p $BUILD_DIR/ssh_key
+       ssh-keygen -C "compute.icn.akraino.lfedge.org" -f $BUILD_DIR/ssh_key/id_rsa
+       cat $BUILD_DIR/ssh_key/id_rsa.pub >> $HOME/.ssh/authorized_keys
+}
+
+set_compute_key() {
+_SSH_LOCAL_KEY=$(cat $BUILD_DIR/ssh_key/id_rsa)
+cat << EOF
+write_files:
+- path: /opt/ssh_id_rsa
+  owner: root:root
+  permissions: '0600'
+  content: |
+    $_SSH_LOCAL_KEY
+EOF
+}
+
+provision_compute_node() {
+       IMAGE_URL=http://172.22.0.1/images/${BM_IMAGE}
+       IMAGE_CHECKSUM=http://172.22.0.1/images/${BM_IMAGE}.md5sum
+
+       if [ ! -d $GOPATH/src/github.com/metal3-io/baremetal-operator ]; then
+               go get github.com/metal3-io/baremetal-operator
+       fi
+
+       go run $GOPATH/src/github.com/metal3-io/baremetal-operator/cmd/make-bm-worker/main.go \
+           -address "ipmi://$COMPUTE_IPMI_ADDRESS" \
+                  -user "$COMPUTE_IPMI_USER" \
+           -password "$COMPUTE_IPMI_PASSWORD" \
+           "$COMPUTE_NODE_NAME" > $COMPUTE_NODE_NAME-bm-node.yaml
+
+       printf "  image:" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n    url: ""%s" "$IMAGE_URL" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n    checksum: ""%s" "$IMAGE_CHECKSUM" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n  userData:" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n    name: ""%s" "$COMPUTE_NODE_NAME""-user-data" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       printf "\n    namespace: metal3\n" >> $COMPUTE_NODE_NAME-bm-node.yaml
+       kubectl apply -f $COMPUTE_NODE_NAME-bm-node.yaml -n metal3
+}
+
+deprovision_compute_node() {
+       kubectl patch baremetalhost $COMPUTE_NODE_NAME -n metal3 --type merge \
+    -p '{"spec":{"image":{"url":"","checksum":""}}}'
+}
+
+set_compute_ssh_config() {
+get_default_inteface_ipaddress default_addr
+cat << EOF
+- path: /root/.ssh/config
+  owner: root:root
+  permissions: '0600'
+  content: |
+    Host bootstrapmachine $default_addr
+    HostName $default_addr
+    IdentityFile /opt/ssh_id_rsa
+    User $USER
+- path: /etc/apt/sources.list
+  owner: root:root
+  permissions: '0665'
+  content: |
+       deb [trusted=yes] ssh://$USER@$default_addr:$LOCAL_APT_REPO ./
+EOF
+}
+
+create_userdata() {
+       printf "#cloud-config\n" > userdata.yaml
+       if [ -n "$COMPUTE_NODE_PASSWORD" ]; then
+               printf "password: ""%s" "$COMPUTE_NODE_PASSWORD" >> userdata.yaml
+               printf "\nchpasswd: {expire: False}\n" >> userdata.yaml
+               printf "ssh_pwauth: True\n" >> userdata.yaml
+       fi
+
+       if [ -n "$COMPUTE_NODE_FQDN" ]; then
+               printf "fqdn: ""%s" "$COMPUTE_NODE_FQDN" >> userdata.yaml
+               printf "\n" >> userdata.yaml
+       fi
+       printf "disable_root: false\n" >> userdata.yaml
+       printf "ssh_authorized_keys:\n  - " >> userdata.yaml
+
+       if [ ! -f $HOME/.ssh/id_rsa.pub ]; then
+               yes y | ssh-keygen -t rsa -N "" -f $HOME/.ssh/id_rsa
+       fi
+
+       cat $HOME/.ssh/id_rsa.pub >> userdata.yaml
+       printf "\n" >> userdata.yaml
+}
+
+apply_userdata_credential() {
+       cat <<EOF > ./$COMPUTE_NODE_NAME-user-data.yaml
+apiVersion: v1
+data:
+  userData: $(base64 -w 0 userdata.yaml)
+kind: Secret
+metadata:
+  name: $COMPUTE_NODE_NAME-user-data
+  namespace: metal3
+type: Opaque
+EOF
+       kubectl apply -n metal3 -f $COMPUTE_NODE_NAME-user-data.yaml
+}
+
+launch_baremetal_operator() {
+       if [ ! -d $GOPATH/src/github.com/metal3-io/baremetal-operator ]; then
+        go get github.com/metal3-io/baremetal-operator
+    fi
+
+       pushd $GOPATH/src/github.com/metal3-io/baremetal-operator
+               make deploy
+       popd
+               
+}
+
+if [ "$1" == "launch" ]; then
+    launch_baremetal_operator
+    exit 0
+fi
+
+if [ "$1" == "deprovision" ]; then
+    deprovision_compute_node
+    exit 0
+fi
+
+if [ "$1" == "provision" ]; then
+    create_userdata
+       apply_userdata_credential
+       provision_compute_node
+    exit 0
+fi
+
+
+echo "Usage: metal3.sh"
+echo "launch      - Launch the metal3 operator"
+echo "provision   - provision baremetal node as specified in common.sh"
+echo "deprovision - deprovision baremetal node as specified in common.sh"
+exit 1
+
+#Following code is tested for the offline mode
+#Will be intergrated for the offline mode for ICNi v.0.1.0 beta
+#create_ssh_key
+#create_userdata
+#set_compute_key
+#set_compute_ssh_config
diff --git a/env/01_install_package.sh b/env/01_install_package.sh
deleted file mode 100644 (file)
index 3e369c3..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-#!/usr/bin/env bash
-set -ex
-
-
diff --git a/env/02_configure.sh b/env/02_configure.sh
deleted file mode 100644 (file)
index c1ddb47..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/usr/bin/env bash
-set -xe
diff --git a/env/03_launch_prereq.sh b/env/03_launch_prereq.sh
deleted file mode 100644 (file)
index d2577bb..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-set -xe
old mode 100644 (file)
new mode 100755 (executable)
index e69de29..0d589a5
@@ -0,0 +1,44 @@
+#!/bin/bash
+
+#supported OS version
+UBUNTU_BIONIC=${UBUNTU_BIONIC:-Ubuntu 18.04.2 LTS}
+
+#offline mode variable
+DOWNLOAD_PATH=${DOWNLOAD_PATH:-/opt/icn/}
+LOCAL_APT_REPO=${LOCAL_APT_REPO:-$DOWNLOAD_PATH/apt}
+PIP_CACHE_DIR=${PIP_CACHE_DIR:-$DOWNLOAD_PATH/pip-cache-dir}
+BUILD_DIR=${BUILD_DIR:-$DOWNLOAD_PATH/build-dir}
+CONTAINER_IMAGES_DIR=${CONTAINER_IMAGES_DIR:-$OFFLINE_DOWNLOAD_PATH/docker-dir}
+
+#set variables
+#Todo include over all variables here
+KUBE_VERSION=${KUBE_VERSION:-"v1.15.0"}
+POD_NETWORK_CIDR=${POD_NETWORK_CIDR:-"10.244.0.0/16"}
+PODMAN_CNI_CONFLIST=${PODMAN_CNI_CONFLIST:-"https://raw.githubusercontent.com/containers/libpod/v1.4.4/cni/87-podman-bridge.conflist"}
+
+#Bootstrap K8s cluster
+
+
+#Ironic variables
+IRONIC_IMAGE=${IRONIC_IMAGE:-"quay.io/metal3-io/ironic:master"}
+IRONIC_INSPECTOR_IMAGE=${IRONIC_INSPECTOR_IMAGE:-"quay.io/metal3-io/ironic-inspector"}
+IRONIC_BAREMETAL_IMAGE=${IRONIC_BAREMETAL_IMAGE:-"quay.io/metal3-io/baremetal-operator:master"}
+IRONIC_BAREMETAL_SOCAT_IMAGE=${IRONIC_BAREMETAL_SOCAT_IMAGE:-"alpine/socat:latest"}
+
+IRONIC_DATA_DIR=${IRONIC_DATA_DIR:-"/opt/ironic"}
+#IRONIC_PROVISIONING_INTERFACE is required to be provisioning, don't change it
+IRONIC_PROVISIONING_INTERFACE=${IRONIC_PROVISIONING_INTERFACE:-"provisioning"}
+IRONIC_IPMI_INTERFACE=${IRONIC_IPMI_INTERFACE:-"eno1"}
+IRONIC_PROVISIONING_INTERFACE_IP=${IRONIC_PROVISIONING_INTERFACE_IP:-"172.22.0.1"}
+IRONIC_IPMI_INTERFACE_IP=${IRONIC_IPMI_INTERFACE_IP:-"172.31.1.9"}
+BM_IMAGE_URL=${BM_IMAGE_URL:-"https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img"}
+BM_IMAGE=${BM_IMAGE:-"bionic-server-cloudimg-amd64.img"}
+
+#Todo change into nodes list in json pattern
+COMPUTE_NODE_NAME=${COMPUTE_NODE_NAME:-"el-100-node-01"}
+COMPUTE_IPMI_ADDRESS=${COMPUTE_IPMI_ADDRESS:-"172.31.1.17"}
+COMPUTE_IPMI_USER=${COMPUTE_IPMI_USER:-"ryeleswa"}
+COMPUTE_IPMI_PASSWORD=${COMPUTE_IPMI_PASSWORD:-"changeme1"}
+COMPUTE_NODE_FQDN=${COMPUTE_NODE_FQDN:-"node01.akraino.org"}
+#COMPUTE_NODE_HOSTNAME=${COMPUTE_NODE_HOSTNAME:-"node01"}
+COMPUTE_NODE_PASSWORD=${COMPUTE_NODE_PASSWORD:-"mypasswd"}
old mode 100644 (file)
new mode 100755 (executable)
index e69de29..40a29f8
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# Log output automatically
+# referred from metal3 project
+LOGDIR="$(dirname $0)/logs"
+if [ ! -d "$LOGDIR" ]; then
+    mkdir -p "$LOGDIR"
+fi
+LOGFILE="$LOGDIR/$(basename $0 .sh)-$(date +%F-%H%M%S).log"
+exec 1> >( tee "${LOGFILE}" ) 2>&1
diff --git a/env/metal3/01_install_package.sh b/env/metal3/01_install_package.sh
new file mode 100755 (executable)
index 0000000..3798c0f
--- /dev/null
@@ -0,0 +1,118 @@
+#!/usr/bin/env bash
+set -ex
+
+LIBDIR="$(dirname "$PWD")"
+
+source $LIBDIR/lib/common.sh
+source $LIBDIR/lib/logging.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "This script must be run as root"
+    exit 1
+fi
+
+function install_essential_packages() {
+    apt-get update
+    apt-get -y install \
+               crudini \
+               curl \
+               dnsmasq \
+               figlet \
+               nmap \
+               patch \
+               psmisc \
+               python-pip \
+               python-requests \
+               python-setuptools \
+               vim \
+               wget \
+               git \
+               software-properties-common
+
+       add-apt-repository ppa:longsleep/golang-backports
+       apt-get update
+       apt-get install golang-go
+}
+
+function install_ironic_packages() {
+    apt-get update
+    apt-get -y install \
+               jq \
+               nodejs \
+               python-ironicclient \
+               python-ironic-inspector-client \
+               python-lxml \
+               python-netaddr \
+               python-openstackclient \
+               unzip \
+               genisoimage
+
+       if [ "$1" == "offline" ]; then
+               pip install --no-index
+                       --find-links=file:$PIP_CACHE_DIR locat yq
+               return
+       fi
+
+    pip install \
+               lolcat \
+               yq
+}
+
+function install_docker_packages() {
+    apt-get remove docker \
+               docker-engine \
+               docker.io \
+               containerd \
+               runc
+    apt-get update
+    apt-get -y install \
+               apt-transport-https \
+               ca-certificates \
+               curl \
+               gnupg-agent \
+               software-properties-common
+       if [ "$1" != "offline" ]; then
+               curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+               add-apt-repository \
+                       "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+                       $(lsb_release -cs) \
+                       stable"
+               apt-get update
+       fi
+    apt-get -y install docker-ce=18.06.0~ce~3-0~ubuntu
+}
+
+function install_podman_packages() {
+       if [ "$1" != "offline" ]; then
+       add-apt-repository -y ppa:projectatomic/ppa
+               apt-get update
+       fi
+    apt-get -y install podman
+}
+
+function install_kubernetes_packages() {
+       if [ "$1" != "offline" ]; then
+               curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+               bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF'
+               apt-get update
+       fi
+       apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
+       apt-mark hold kubelet kubeadm kubectl
+}
+
+install() {
+       install_essential_packages
+       install_ironic_packages $1
+       install_docker_packages $1
+       install_podman_packages $1
+       install_kubernetes_packages $1
+}
+
+if ["$1" == "-o"]; then
+       install offline
+       exit 0
+fi
+
+install
diff --git a/env/metal3/02_configure.sh b/env/metal3/02_configure.sh
new file mode 100755 (executable)
index 0000000..15864d6
--- /dev/null
@@ -0,0 +1,150 @@
+#!/usr/bin/env bash
+set -xe
+LIBDIR="$(dirname "$PWD")"
+
+source $LIBDIR/lib/logging.sh
+source $LIBDIR/lib/common.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "confgiure script must be run as root"
+    exit 1
+fi
+
+function check_inteface_ip() {
+       local interface=$1
+       local ipaddr=$2
+
+    if [ ! $(ip addr show dev $interface) ]; then
+        exit 1
+    fi
+
+    local ipv4address=$(ip addr show dev $interface | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }')
+    if [ "$ipv4address" != "$ipaddr" ]; then
+        exit 1
+    fi
+}
+
+function configure_kubelet() {
+       swapoff -a
+       #Todo addition kubelet configuration
+}
+
+function configure_kubeadm() {
+       #Todo error handing
+       if [ "$1" == "offline" ]; then
+               for images in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do
+               docker load --input $CONTAINER_IMAGES_DIR/$images.tar;
+               done
+
+               docker load --input $CONTAINER_IMAGES_DIR/pause.tar
+               docker load --input $CONTAINER_IMAGES_DIR/etcd.tar
+               docker load --input $CONTAINER_IMAGES_DIR/coredns.tar
+        return
+    fi
+       kubeadm config images pull --kubernetes-version=$KUBE_VERSION
+}
+
+function configure_ironic_interfaces() {
+       #Todo later to change the CNI networking for podman networking
+       # Add firewall rules to ensure the IPA ramdisk can reach httpd, Ironic and the Inspector API on the host
+       if [ "$IRONIC_PROVISIONING_INTERFACE" ]; then
+               check_inteface_ip $IRONIC_PROVISIONING_INTERFACE $IRONIC_PROVISIONING_INTERFACE_IP      
+       else
+               exit 1
+
+       fi
+
+       if [ "$IRONIC_IPMI_INTERFACE" ]; then
+        check_inteface_ip $IRONIC_IPMI_INTERFACE $IRONIC_IPMI_INTERFACE_IP
+    else
+        exit 1
+    fi
+
+       for port in 80 5050 6385 ; do
+       if ! sudo iptables -C INPUT -i $IRONIC_PROVISIONING_INTERFACE -p tcp -m tcp --dport $port -j ACCEPT > /dev/null 2>&1; then
+               sudo iptables -I INPUT -i $IRONIC_PROVISIONING_INTERFACE -p tcp -m tcp --dport $port -j ACCEPT
+       fi
+       done
+
+       # Allow ipmi to the bmc processes
+       if ! sudo iptables -C INPUT -i $IRONIC_IPMI_INTERFACE -p udp -m udp --dport 6230:6235 -j ACCEPT 2>/dev/null ; then
+       sudo iptables -I INPUT -i $IRONIC_IPMI_INTERFACE -p udp -m udp --dport 6230:6235 -j ACCEPT
+       fi
+
+       #Allow access to dhcp and tftp server for pxeboot
+       for port in 67 69 ; do
+       if ! sudo iptables -C INPUT -i $IRONIC_PROVISIONING_INTERFACE -p udp --dport $port -j ACCEPT 2>/dev/null ; then
+               sudo iptables -I INPUT -i $IRONIC_PROVISIONING_INTERFACE -p udp --dport $port -j ACCEPT
+       fi
+       done
+}
+
+function configure_ironic_offline() {
+       if [ ! -d $CONTAINER_IMAGES_DIR ] && [ ! -d $BUILD_DIR ]; then
+               exit 1  
+       fi
+
+       for image in ironic-inspector-image ironic-image podman-pause \
+               baremetal-operator socat; do
+               if [ ! -f "$CONTAINER_IMAGES_DIR/$image" ]; then
+                       exit 1
+               fi
+       done
+
+       if [ ! -f "$BUILD_DIR/ironic-python-agent.initramfs"] && [ ! -f \
+               "$BUILD_DIR/ironic-python-agent.kernel" ] && [ ! -f
+               "$BUILD_DIR/$BM_IMAGE" ]; then
+               exit 1
+       fi
+
+       podman load --input $CONTAINER_IMAGES_DIR/ironic-inspector-image.tar
+       podman load --input $CONTAINER_IMAGES_DIR/ironic-image.tar
+       podman load --input $CONTAINER_IMAGES_DIR/podman-pause.tar
+
+       docker load --input $CONTAINER_IMAGES_DIR/baremetal-operator.tar
+       docker load --input $CONTAINER_IMAGES_DIR/socat.tar
+
+       mkdir -p "$IRONIC_DATA_DIR/html/images"
+
+       cp $BUILD_DIR/ironic-python-agent.initramfs $IRONIC_DATA_DIR/html/images/
+       cp $BUILD_DIR/ironic-python-agent.kernel $IRONIC_DATA_DIR/html/images/
+       cp $BUILD_DIR/$BM_IMAGE $IRONIC_DATA_DIR/html/images/
+       md5sum $BUILD_DIR/$BM_IMAGE | awk '{print $1}' > $BUILD_DIR/${BM_IMAGE}.md5sum
+}
+
+function configure_ironic() {
+       if [ "$1" == "offline" ]; then
+               configure_ironic_offline
+               return
+       fi
+
+       podman pull $IRONIC_IMAGE
+       podman pull $IRONIC_INSPECTOR_IMAGE
+       
+       mkdir -p "$IRONIC_DATA_DIR/html/images"
+       pushd $IRONIC_DATA_DIR/html/images
+       
+       if [ ! -f ironic-python-agent.initramfs ]; then
+               curl --insecure --compressed -L https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo/ironic-python-agent.tar | tar -xf -
+       fi
+       
+       if [[ "$BM_IMAGE_URL" && "$BM_IMAGE" ]]; then
+       curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
+       md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
+       fi
+       popd
+}
+
+function configure() {
+       configure_kubeadm $1
+       configure_kubelet
+       configure_ironic_interfaces
+       configure_ironic $1
+}
+
+if [ "$1" == "-o" ]; then
+    configure offline
+    exit 0
+fi
+
+configure
diff --git a/env/metal3/03_launch_prereq.sh b/env/metal3/03_launch_prereq.sh
new file mode 100755 (executable)
index 0000000..95c17f0
--- /dev/null
@@ -0,0 +1,143 @@
+#!/bin/bash
+set -xe
+
+LIBDIR="$(dirname "$PWD")"
+
+source $LIBDIR/lib/logging.sh
+source $LIBDIR/lib/common.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "launch script must be run as root"
+    exit 1
+fi
+
+function get_default_inteface_ipaddress() {
+       local _ip=$1
+       local _default_interface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route)
+       local _ipv4address=$(ip addr show dev $_default_interface | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }')
+       eval $_ip="'$_ipv4address'"
+}
+
+
+
+function check_cni_network() {
+       #since bootstrap cluster is a single node cluster,
+       #podman and bootstap cluster have same network configuration to avoid the cni network conf conflicts
+       if [ ! -d "/etc/cni/net.d" ]; then
+               mkdir -p "/etc/cni/net.d"
+       fi
+
+       if [ ! -f "/etc/cni/net.d/87-podman-bridge.conflist" ]; then
+               if [ "$1" == "offline" ]; then
+                       cp $BUILD_DIR/87-podman-bridge.conflist /etc/cni/net.d/
+                       return
+       fi
+
+               if !(wget $PODMAN_CNI_CONFLIST -P /etc/cni/net.d/); then
+                       exit 1
+               fi
+       fi
+}
+
+function create_k8s_regular_user() {
+       if [ ! -d "$HOME/.kube" ]; then
+               mkdir -p $HOME/.kube
+       fi
+
+       if [ ! -f /etc/kubernetes/admin.conf]; then
+               exit 1
+       fi
+
+       cp -rf /etc/kubernetes/admin.conf $HOME/.kube/config
+       chown $(id -u):$(id -g) $HOME/.kube/config
+}
+
+function check_k8s_node_status(){
+       echo 'checking bootstrap cluster single node status'
+       node_status="False"
+
+       for i in {1..5}
+               do
+                       check_node=$(kubectl get node -o \
+                                               jsonpath='{.items[0].status.conditions[?(@.reason == "KubeletReady")].status}')
+                       if [ $check_node != "" ]; then
+                               node_status=${check_node}
+                       fi
+
+                       if [ $node_status == "True" ]; then
+                               break
+                       fi
+
+                       sleep 3
+               done
+
+       if [ $node_status != "True" ]; then
+               echo "bootstrap cluster single node status is not ready"
+               exit 1
+       fi
+}
+
+function install_podman() {
+       # set password for mariadb
+       mariadb_password=$(echo $(date;hostname)|sha256sum |cut -c-20)
+
+       # Create pod
+       podman pod create -n ironic-pod
+
+       # Start dnsmasq, http, mariadb, and ironic containers using same image
+       podman run -d --net host --privileged --name dnsmasq  --pod ironic-pod \
+               -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/rundnsmasq ${IRONIC_IMAGE}
+
+       podman run -d --net host --privileged --name httpd --pod ironic-pod \
+       -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runhttpd ${IRONIC_IMAGE}
+
+       podman run -d --net host --privileged --name mariadb --pod ironic-pod \
+       -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runmariadb \
+       --env MARIADB_PASSWORD=$mariadb_password ${IRONIC_IMAGE}
+
+       podman run -d --net host --privileged --name ironic --pod ironic-pod \
+       --env MARIADB_PASSWORD=$mariadb_password \
+       -v $IRONIC_DATA_DIR:/shared ${IRONIC_IMAGE}
+
+       # Start Ironic Inspector
+       podman run -d --net host --privileged --name ironic-inspector \
+               --pod ironic-pod "${IRONIC_INSPECTOR_IMAGE}"
+}
+
+function remove_k8s_noschedule_taint() {
+       #Bootstrap cluster is a single node
+       nodename=$(kubectl get node -o jsonpath='{.items[0].metadata.name}')
+       if !(kubectl taint node $nodename node-role.kubernetes.io/master:NoSchedule-); then
+               exit 1
+       fi
+}
+
+function install_k8s_single_node() {
+       get_default_inteface_ipaddress apiserver_advertise_addr
+       kubeadm_init="kubeadm init --kubernetes-version=$KUBE_VERSION \
+                                       --pod-network-cidr=$POD_NETWORK_CIDR \
+                                       --apiserver-advertise-address=$apiserver_advertise_addr"
+       if !(${kubeadm_init}); then
+               exit 1
+       fi
+}
+
+function install() {
+       #install_kubernetes
+       install_k8s_single_node
+       check_cni_network $1
+       create_k8s_regular_user
+       check_k8s_node_status
+       remove_k8s_noschedule_taint
+
+       #install_podman
+       #Todo - error handling mechanism
+       install_podman
+}
+
+if [ "$1" == "-o" ]; then
+    install offline
+    exit 0
+fi
+
+install
diff --git a/env/ubuntu/bootloader-env/01_bootloader_package_req.sh b/env/ubuntu/bootloader-env/01_bootloader_package_req.sh
new file mode 100755 (executable)
index 0000000..793fce1
--- /dev/null
@@ -0,0 +1,294 @@
+#!/usr/bin/env bash
+set -ex
+shopt -s extglob
+
+source $(dirname $PWD)/../lib/common.sh
+source $(dirname $PWD)/../lib/logging.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "This script must be run as root"
+    exit 1
+fi
+
+if [[ $(lsb_release -d | cut -f2) != $UBUNTU_BIONIC ]]; then
+    echo "Currently Ubuntu 18.04.2 LTS is only supported"
+    exit 1
+fi
+
+function download_essential_packages() {
+    apt-get update
+       for package in crudini curl dnsmasq figlet golang nmap patch psmisc \
+                       python-pip python-requests python-setuptools vim wget; do
+       apt-get -d install $package -y
+       done
+}
+
+function build_baremetal_operator_images() {
+       if [ ! -d "$BUILD_DIR/baremetal-operator"]; then
+               return
+       fi
+
+       pushd $BUILD_DIR/baremetal-operator
+       docker build -t $IRONIC_BAREMETAL_IMAGE . -f build/Dockerfile
+       docker save --output \
+               $CONTAINER_IMAGES_DIR/baremetal-operator.tar $IRONIC_BAREMETAL_IMAGE
+       popd
+
+       docker pull $IRONIC_BAREMETAL_SOCAT_IMAGE
+       docker save --output $CONTAINER_IMAGES_DIR/socat.tar $IRONIC_BAREMETAL_SOCAT_IMAGE
+}
+
+function build_ironic_images() {
+       for images in ironic-image ironic-inspector-image; do
+               if [ -d "$BUILD_DIR/$images" ]; then
+                       pushd $BUILD_DIR/$images
+                       podman build -t $images .
+                       popd
+               fi
+       done
+
+       if podman images -q localhost/ironic-inspector-image ; then
+               podman tag localhost/ironic-inspector-image $IRONIC_INSPECTOR_IMAGE
+               podman save --output \
+                       $CONTAINER_IMAGES_DIR/ironic-inspector-image.tar \
+                       $IRONIC_INSPECTOR_IMAGE
+       fi
+
+       if podman images -q localhost/ironic-image ; then
+        podman tag localhost/ironic-inspector-image $IRONIC_IMAGE
+               podman save --output $CONTAINER_IMAGES_DIR/ironic-image.tar \
+                       $IRONIC_IMAGE
+    fi
+       
+       podman pull k8s.gcr.io/pause:3.1
+       podman save --output $CONTAINER_IMAGES_DIR/podman-pause.tar \
+               k8s.gcr.io/pause:3.1
+
+       #build_baremetal_operator_images
+}
+
+
+function download_container_images() {
+       check_docker
+       pushd $CONTAINER_IMAGES_DIR
+       #docker images for Kubernetes
+       for images in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do
+               docker pull k8s.gcr.io/$images:v1.15.0; 
+               docker save --output $images.tar k8s.gcr.io/$images;
+       done
+
+       docker pull k8s.gcr.io/pause:3.1
+       docker save --output pause.tar k8s.gcr.io/pause
+
+       docker pull k8s.gcr.io/etcd:3.3.10
+       docker save --output etcd.tar k8s.gcr.io/etcd
+
+       docker pull k8s.gcr.io/coredns:1.3.1
+       docker save --output coredns.tar k8s.gcr.io/coredns
+
+       #podman images for Ironic
+       check_podman
+       build_ironic_images
+       #podman pull $IRONIC_IMAGE 
+       #podman save --output ironic.tar $IRONIC_IMAGE
+       #podman pull $IRONIC_INSPECTOR_IMAGE 
+       #podman save --output ironic-inspector.tar $IRONIC_INSPECTOR_IMAGE
+       popd
+}
+
+function download_build_packages() {
+       check_curl
+       pushd $BUILD_DIR
+       if [ ! -f ironic-python-agent.initramfs ]; then
+               curl --insecure --compressed \
+                       -L https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo/ironic-python-agent.tar | tar -xf -
+       fi
+
+       if [[ "$BM_IMAGE_URL" && "$BM_IMAGE" ]]; then
+               curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
+               md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
+    fi
+
+       if [ ! -f 87-podman-bridge.conflist ]; then
+               curl --insecure --compressed -O -L $PODMAN_CNI_CONFLIST
+       fi
+
+       if [ ! -d baremetal-operator ]; then
+               git clone https://github.com/metal3-io/baremetal-operator.git
+               pushd ./baremetal-operator
+               git checkout -b icn_baremetal_operator 11ea02ab5cab8b3ab14972ae7c0e70206bba00b5
+               popd
+       fi
+
+       if [ ! -d ironic-inspector-image ]; then
+               git clone https://github.com/metal3-io/ironic-inspector-image.git
+               pushd ./ironic-inspector-image
+               git checkout -b icn_ironic_inspector_image 25431bd5b7fc87c6f3cfb8b0431fe66b86bbab0e
+               popd
+       fi
+
+       if [ ! -d ironic-image ]; then
+               git clone https://github.com/metal3-io/ironic-image.git
+               pushd ./ironic-image
+               git checkout -b icn_ironic_image 329eb4542f0d8d0f0e9cf0d7e550e33b07efe7fb
+               popd
+       fi
+}
+
+function check_pip() {
+       if ! which pip ; then
+               apt-get install python-pip -y
+       fi
+}
+
+function check_curl() {
+       if ! which curl ; then
+        apt-get install curl -y
+    fi
+}
+
+function check_apt_tools() {
+       if ! which add-apt-repository ; then
+               apt-get install software-properties-common -y
+       fi
+}
+
+function download_ironic_packages() {
+       for package in jq nodejs python-ironicclient \
+                       python-ironic-inspector-client python-lxml python-netaddr \
+                       python-openstackclient unzip genisoimage; do
+       apt-get -d install $package -y
+       done
+       
+       check_pip    
+    pip download lolcat yq -d $PIP_CACHE_DIR
+}
+
+function check_docker() {
+       if which docker ; then
+               return
+       fi
+
+    apt-get remove -y docker \
+        docker-engine \
+        docker.io \
+        containerd \
+        runc \
+        docker-ce
+    apt-get update
+    for package in apt-transport-https ca-certificates gnupg-agent \
+            software-properties-common; do
+        apt-get -d install $package -y
+    done
+
+    check_curl
+    check_apt_tools
+    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+    add-apt-repository \
+        "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+        $(lsb_release -cs) \
+        stable"
+    apt-get update
+    apt-get install docker-ce=18.06.0~ce~3-0~ubuntu -y
+}
+
+function check_podman() {
+       if which podman; then
+               return
+       fi
+
+    add-apt-repository -y ppa:projectatomic/ppa
+       apt-get update
+    apt-get install podman -y
+}
+
+function download_docker_packages() {
+    apt-get remove -y docker \
+        docker-engine \
+        docker.io \
+        containerd \
+        runc \
+               docker-ce
+    apt-get update
+       for package in apt-transport-https ca-certificates gnupg-agent \
+                       software-properties-common; do
+       apt-get -d install $package -y
+       done
+
+       check_curl
+       check_apt_tools
+    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+    add-apt-repository \
+        "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+        $(lsb_release -cs) \
+        stable"
+    apt-get update
+    apt-get -d install docker-ce=18.06.0~ce~3-0~ubuntu -y
+}
+
+function download_podman_packages() {
+    apt-get update
+    add-apt-repository -y ppa:projectatomic/ppa
+    apt-get -d install podman -y
+}
+
+function download_kubernetes_packages() {
+   curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+   bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF'
+   apt-get update
+   apt-get install -d kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00 -y
+}
+
+function clean_apt_cache() {
+       pushd /var/cache/apt/archives
+
+       if [ $(ls -1q . | wc -l ) -ge 3 ]; then
+       $(rm !("lock"|"partial"))
+       fi
+       popd
+       
+}
+
+function mv_apt_cache() {
+    pushd /var/cache/apt/archives
+
+    if [ $(ls -1q . | wc -l ) -gt 2 ]; then
+        $(mv !("lock"|"partial") $LOCAL_APT_REPO)
+    fi
+    popd
+}
+
+function check_dir() {
+    if [ ! -d $1 ]; then
+        mkdir -p $1
+    fi
+}
+
+function clean_dir() {
+    pushd $1
+
+    if [ $(ls -1q . | wc -l ) -ne 0 ]; then
+        $(rm -r ./*)
+    fi
+    popd
+}
+
+clean_apt_cache
+check_dir $LOCAL_APT_REPO 
+clean_dir $LOCAL_APT_REPO 
+check_dir $PIP_CACHE_DIR
+clean_dir $PIP_CACHE_DIR
+check_dir $BUILD_DIR
+clean_dir $BUILD_DIR
+check_dir $CONTAINER_IMAGES_DIR
+clean_dir $CONTAINER_IMAGES_DIR
+download_essential_packages
+download_ironic_packages
+download_docker_packages
+download_podman_packages
+download_kubernetes_packages
+download_build_packages
+download_container_images
+mv_apt_cache
diff --git a/env/ubuntu/bootloader-env/02_clean_bootloader_package_req.sh b/env/ubuntu/bootloader-env/02_clean_bootloader_package_req.sh
new file mode 100755 (executable)
index 0000000..4154b6f
--- /dev/null
@@ -0,0 +1,144 @@
+#!/usr/bin/env bash
+set -ex
+
+source $(dirname $PWD)/../lib/common.sh
+source $(dirname $PWD)/../lib/logging.sh
+
+if [[ $EUID -ne 0 ]]; then
+    echo "This script must be run as root"
+    exit 1
+fi
+
+if [[ $(lsb_release -d | cut -f2) != $UBUNTU_BIONIC ]]; then
+    echo "Currently Ubuntu 18.04.2 LTS is only supported"
+    exit 1
+fi
+
+function clean_essential_packages() {
+    apt-get update
+       for package in crudini curl dnsmasq figlet golang nmap patch psmisc \
+                       python-pip python-requests python-setuptools vim wget; do
+       apt-get remove $package -y
+       done
+
+       apt-get autoremove -y
+       rm -rf /etc/apt/sources.list.d/*
+}
+
+function check_prerequisite() {
+    if !(which pip); then
+        apt-get install python-pip -y
+    fi
+
+    if !(which curl); then
+        apt-get install curl -y
+    fi
+
+    if !(which add-apt-repository); then
+        apt-get install software-properties-common -y
+    fi
+}
+
+function clean_ironic_packages() {
+       for package in jq nodejs python-ironicclient \
+                       python-ironic-inspector-client python-lxml python-netaddr \
+                       python-openstackclient unzip genisoimage; do
+       apt-get remove $package -y
+       done    
+}
+
+function clean_docker_packages() {
+    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+    add-apt-repository \
+        "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+        $(lsb_release -cs) \
+        stable"
+    apt-get update
+    apt-get remove docker-ce -y
+       for package in apt-transport-https ca-certificates gnupg-agent \
+            software-properties-common; do
+        apt-get remove $package -y
+    done
+
+       apt-get remove -y docker \
+        docker-engine \
+        docker.io \
+        containerd \
+        runc \
+        docker-ce
+
+       apt-get update
+}
+
+function clean_podman_packages() {
+    apt-get update
+    add-apt-repository -y ppa:projectatomic/ppa
+    apt-get remove podman -y
+}
+
+function clean_kubernetes_packages() {
+       #Just to make sure kubernetes packages are removed during the download
+   curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+   bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF'
+   apt-get update
+   apt-get remove kubelet kubeadm kubectl -y
+}
+
+function clean_apt_cache() {
+       shopt -s extglob
+       pushd /var/cache/apt/archives
+
+       if [ $(ls -1q . | wc -l ) -ge 3 ]; then
+       $(rm !("lock"|"partial"))
+       fi
+       popd
+       
+}
+
+function mv_apt_cache() {
+       shopt -s extglob
+    pushd /var/cache/apt/archives
+
+    if [ $(ls -1q . | wc -l ) -gt 2 ]; then
+        $(mv !("lock"|"partial") $LOCAL_APT_REPO)
+    fi
+    popd
+}
+
+function check_dir() {
+    if [ ! -d $1 ]; then
+        mkdir -p $1
+    fi
+}
+
+function clean_dir() {
+       shopt -s extglob
+    pushd $1
+
+    if [ $(ls -1q . | wc -l ) -ne 0 ]; then
+        $(rm -r ./*)
+    fi
+    popd
+}
+
+check_prerequisite
+clean_apt_cache
+check_dir $LOCAL_APT_REPO
+clean_dir $LOCAL_APT_REPO
+check_dir $PIP_CACHE_DIR
+clean_dir $PIP_CACHE_DIR
+check_dir $BUILD_DIR
+clean_dir $BUILD_DIR
+check_dir $CONTAINER_IMAGES_DIR
+clean_dir $CONTAINER_IMAGES_DIR
+clean_kubernetes_packages
+clean_podman_packages
+clean_docker_packages
+clean_ironic_packages
+clean_essential_packages
+rm -rf $LOCAL_APT_REPO
+rm -rf $PIP_CACHE_DIR
+rm -rf $BUILD_DIR
+rm -rf $CONTAINER_IMAGES_DIR