Remove BPA from Makefile
[icn.git] / cmd / bpa-operator / pkg / controller / provisioning / provisioning_controller.go
index e7e7c34..0ad5a47 100644 (file)
@@ -1,37 +1,53 @@
 package provisioning
 
 import (
+       "bytes"
        "context"
-        "os"
-        "fmt"
-        "bytes"
-       "regexp"
-       "strings"
+       "encoding/json"
+       "fmt"
        "io/ioutil"
-        "path/filepath"
-        "os/user"
-
+       "os"
+       "strings"
+       "time"
 
        bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
-        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-        "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-        "k8s.io/apimachinery/pkg/runtime/schema"
+       batchv1 "k8s.io/api/batch/v1"
+       corev1 "k8s.io/api/core/v1"
        "k8s.io/apimachinery/pkg/api/errors"
+       metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+       "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
        "k8s.io/apimachinery/pkg/runtime"
-        "k8s.io/client-go/tools/clientcmd"
-        "k8s.io/client-go/dynamic"
+       "k8s.io/apimachinery/pkg/runtime/schema"
+       "k8s.io/client-go/dynamic"
+
+       "golang.org/x/crypto/ssh"
+       "gopkg.in/ini.v1"
+       "k8s.io/client-go/kubernetes"
        "sigs.k8s.io/controller-runtime/pkg/client"
+       "sigs.k8s.io/controller-runtime/pkg/client/config"
        "sigs.k8s.io/controller-runtime/pkg/controller"
        "sigs.k8s.io/controller-runtime/pkg/handler"
        "sigs.k8s.io/controller-runtime/pkg/manager"
        "sigs.k8s.io/controller-runtime/pkg/reconcile"
        logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
        "sigs.k8s.io/controller-runtime/pkg/source"
-        "gopkg.in/ini.v1"
 )
 
+type VirtletVM struct {
+       IPaddress  string
+       MACaddress string
+}
+
+type NetworksStatus struct {
+       Name      string      `json:"name,omitempty"`
+       Interface string      `json:"interface,omitempty"`
+       Ips       []string    `json:"ips,omitempty"`
+       Mac       string      `json:"mac,omitempty"`
+       Default   bool        `json:"default,omitempty"`
+       Dns       interface{} `json:"dns,omitempty"`
+}
+
 var log = logf.Log.WithName("controller_provisioning")
-var dhcpLeaseFile = "/var/lib/dhcp/dhcpd.leases"
 
 /**
 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
@@ -46,7 +62,23 @@ func Add(mgr manager.Manager) error {
 
 // newReconciler returns a new reconcile.Reconciler
 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
-       return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme()}
+
+       config, err := config.GetConfig()
+       if err != nil {
+               fmt.Printf("Could not get kube config, Error: %v\n", err)
+       }
+
+       clientSet, err := kubernetes.NewForConfig(config)
+       if err != nil {
+               fmt.Printf("Could not create clientset, Error: %v\n", err)
+       }
+       bmhDynamicClient, err := dynamic.NewForConfig(config)
+
+       if err != nil {
+               fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
+       }
+
+       return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient}
 }
 
 // add adds a new Controller to mgr with r as the reconcile.Reconciler
@@ -63,6 +95,31 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
                return err
        }
 
+       // Watch for changes to resource configmap created as a consequence of the provisioning CR
+       err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
+               IsController: true,
+               OwnerType:    &bpav1alpha1.Provisioning{},
+       })
+
+       if err != nil {
+               return err
+       }
+
+       //Watch for changes to job resource also created as a consequence of the provisioning CR
+       err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
+               IsController: true,
+               OwnerType:    &bpav1alpha1.Provisioning{},
+       })
+
+       if err != nil {
+               return err
+       }
+
+       // Watch for changes to resource software CR
+       err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
+       if err != nil {
+               return err
+       }
 
        return nil
 }
@@ -74,8 +131,10 @@ var _ reconcile.Reconciler = &ReconcileProvisioning{}
 type ReconcileProvisioning struct {
        // This client, initialized using mgr.Client() above, is a split client
        // that reads objects from the cache and writes to the apiserver
-       client client.Client
-       scheme *runtime.Scheme
+       client    client.Client
+       scheme    *runtime.Scheme
+       clientset kubernetes.Interface
+       bmhClient dynamic.Interface
 }
 
 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
@@ -87,237 +146,719 @@ type ReconcileProvisioning struct {
 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
        reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
-       reqLogger.Info("Reconciling Provisioning")
+       fmt.Printf("\n\n")
+       reqLogger.Info("Reconciling Custom Resource")
 
        // Fetch the Provisioning instance
        provisioningInstance := &bpav1alpha1.Provisioning{}
+       softwareInstance := &bpav1alpha1.Software{}
        err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
+       provisioningCreated := true
        if err != nil {
-               if errors.IsNotFound(err) {
-                       // Request object not found, could have been deleted after reconcile request.
-                       // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
-                       // Return and don't requeue
-                       return reconcile.Result{}, nil
+
+               //Check if its a Software Instance
+               err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
+               if err != nil {
+                       if errors.IsNotFound(err) {
+                               // Request object not found, could have been deleted after reconcile request.
+                               // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
+                               // Return and don't requeue
+                               return reconcile.Result{}, nil
+                       }
+
+                       // Error reading the object - requeue the request.
+                       return reconcile.Result{}, err
+               }
+
+               //No error occured and so a Software CR was created not a Provisoning CR
+               provisioningCreated = false
+       }
+
+       masterTag := "MASTER_"
+       workerTag := "WORKER_"
+
+       if provisioningCreated {
+
+               ///////////////////////////////////////////////////////////////////////////////////////////////
+               ////////////////         Provisioning CR was created so install KUD          /////////////////
+               //////////////////////////////////////////////////////////////////////////////////////////////
+               clusterName := provisioningInstance.Labels["cluster"]
+               clusterType := provisioningInstance.Labels["cluster-type"]
+               mastersList := provisioningInstance.Spec.Masters
+               workersList := provisioningInstance.Spec.Workers
+               kudPlugins := provisioningInstance.Spec.KUDPlugins
+               podSubnet := provisioningInstance.Spec.PodSubnet
+
+               bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
+               virtletVMList, _ := listVirtletVMs(r.clientset)
+
+               var allString string
+               var masterString string
+               var workerString string
+
+               multiClusterDir := "/multi-cluster"
+
+               //Create Directory for the specific cluster
+               clusterDir := multiClusterDir + "/" + clusterName
+               os.MkdirAll(clusterDir, os.ModePerm)
+
+               //Create Maps to be used for cluster ip address to label configmap
+               clusterLabel := make(map[string]string)
+               clusterLabel["cluster"] = clusterName
+               clusterData := make(map[string]string)
+
+               //Iterate through mastersList and get all the mac addresses and IP addresses
+               for _, masterMap := range mastersList {
+
+                       for masterLabel, master := range masterMap {
+                               masterMAC := master.MACaddress
+                               hostIPaddress := ""
+
+                               if masterMAC == "" {
+                                       err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
+                                       return reconcile.Result{}, err
+                               }
+
+                               containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
+
+                               //Check 'cluster-type' label for Virtlet VMs
+                               if clusterType == "virtlet-vm" {
+                                       //Get VM IP address of master
+                                       hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
+                                       if err != nil || hostIPaddress == "" {
+                                               err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
+                                               return reconcile.Result{}, err
+                                       }
+                                       containsMac = true
+                               }
+
+                               if containsMac {
+
+                                       if clusterType != "virtlet-vm" {
+                                               fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
+
+                                               //Get IP address of master
+                                               hostIPaddress, err = getHostIPaddress(bareMetalHostList, masterMAC)
+                                               if err != nil || hostIPaddress == "" {
+                                                       err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
+                                                       return reconcile.Result{}, err
+                                               }
+                                               allString += masterLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
+                                       }
+
+                                       if clusterType == "virtlet-vm" {
+                                               allString += masterLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+                                       }
+                                       masterString += masterLabel + "\n"
+                                       clusterData[masterTag+masterLabel] = hostIPaddress
+
+                                       fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
+
+                                       if len(workersList) != 0 {
+
+                                               //Iterate through workersList and get all the mac addresses
+                                               for _, workerMap := range workersList {
+
+                                                       //Get worker labels from the workermap
+                                                       for workerLabel, worker := range workerMap {
+
+                                                               //Check if workerString already contains worker label
+                                                               containsWorkerLabel := strings.Contains(workerString, workerLabel)
+                                                               workerMAC := worker.MACaddress
+                                                               hostIPaddress = ""
+
+                                                               //Error occurs if the same label is given to different hosts (assumption,
+                                                               //each MAC address represents a unique host
+                                                               if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
+                                                                       if containsWorkerLabel {
+                                                                               strings.ReplaceAll(workerString, workerLabel, "")
+                                                                       }
+                                                                       err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
+                                      different label to node with MACAddress %s`, workerLabel, workerMAC)
+                                                                       return reconcile.Result{}, err
+
+                                                                       //same node performs worker and master roles
+                                                               } else if workerLabel == masterLabel && !containsWorkerLabel {
+                                                                       workerString += workerLabel + "\n"
+
+                                                                       //Add host to ip address config map with worker tag
+                                                                       hostIPaddress = clusterData[masterTag+masterLabel]
+                                                                       clusterData[workerTag+masterLabel] = hostIPaddress
+
+                                                                       //Error occurs if the same node is given different labels
+                                                               } else if workerLabel != masterLabel && workerMAC == masterMAC {
+                                                                       if containsWorkerLabel {
+                                                                               strings.ReplaceAll(workerString, workerLabel, "")
+                                                                       }
+                                                                       err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
+                                                        different label to node with MACAddress %s`, workerLabel, workerMAC)
+                                                                       return reconcile.Result{}, err
+
+                                                                       //worker node is different from any master node and it has not been added to the worker list
+                                                               } else if workerLabel != masterLabel && !containsWorkerLabel {
+
+                                                                       // Error occurs if MAC address not provided for worker node not matching master
+                                                                       if workerMAC == "" {
+                                                                               err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
+                                                                               return reconcile.Result{}, err
+                                                                       }
+
+                                                                       containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
+
+                                                                       if clusterType == "virtlet-vm" {
+                                                                               //Get VM IP address of master
+                                                                               hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
+                                                                               if err != nil || hostIPaddress == "" {
+                                                                                       err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
+                                                                                       return reconcile.Result{}, err
+                                                                               }
+                                                                               containsMac = true
+                                                                       }
+
+                                                                       if containsMac {
+
+                                                                               if clusterType != "virtlet-vm" {
+                                                                                       fmt.Printf("Host %s matches that macAddress\n", bmhCR)
+
+                                                                                       //Get IP address of worker
+                                                                                       hostIPaddress, err = getHostIPaddress(bareMetalHostList, workerMAC)
+                                                                                       if err != nil {
+                                                                                               fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
+                                                                                               return reconcile.Result{}, err
+                                                                                       }
+                                                                                       allString += workerLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
+                                                                               }
+                                                                               fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
+
+                                                                               if clusterType == "virtlet-vm" {
+                                                                                       allString += masterLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+                                                                               }
+                                                                               workerString += workerLabel + "\n"
+                                                                               clusterData[workerTag+workerLabel] = hostIPaddress
+
+                                                                               //No host found that matches the worker MAC
+                                                                       } else {
+
+                                                                               err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
+                                                                               return reconcile.Result{}, err
+                                                                       }
+                                                               }
+                                                       }
+                                               }
+                                               //No worker node specified, add master as worker node
+                                       } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
+                                               workerString += masterLabel + "\n"
+
+                                               //Add host to ip address config map with worker tag
+                                               hostIPaddress = clusterData[masterTag+masterLabel]
+                                               clusterData[workerTag+masterLabel] = hostIPaddress
+                                       }
+
+                                       //No host matching master MAC found
+                               } else {
+                                       err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
+                                       return reconcile.Result{}, err
+                               }
+                       }
+               }
+
+               //Create host.ini file
+               //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini"
+               iniHostFilePath := clusterDir + "/hosts.ini"
+               newFile, err := os.Create(iniHostFilePath)
+               defer newFile.Close()
+
+               if err != nil {
+                       fmt.Printf("Error occured while creating file \n %v", err)
+                       return reconcile.Result{}, err
+               }
+
+               hostFile, err := ini.Load(iniHostFilePath)
+               if err != nil {
+                       fmt.Printf("Error occured while Loading file \n %v", err)
+                       return reconcile.Result{}, err
+               }
+
+               _, err = hostFile.NewRawSection("all", allString)
+               if err != nil {
+                       fmt.Printf("Error occured while creating section \n %v", err)
+                       return reconcile.Result{}, err
                }
-               // Error reading the object - requeue the request.
+               _, err = hostFile.NewRawSection("kube-master", masterString)
+               if err != nil {
+                       fmt.Printf("Error occured while creating section \n %v", err)
+                       return reconcile.Result{}, err
+               }
+
+               _, err = hostFile.NewRawSection("kube-node", workerString)
+               if err != nil {
+                       fmt.Printf("Error occured while creating section \n %v", err)
+                       return reconcile.Result{}, err
+               }
+
+               _, err = hostFile.NewRawSection("etcd", masterString)
+               if err != nil {
+                       fmt.Printf("Error occured while creating section \n %v", err)
+                       return reconcile.Result{}, err
+               }
+
+               if clusterType != "virtlet-vm" {
+                       _, err = hostFile.NewRawSection("ovn-central", masterString)
+                       if err != nil {
+                               fmt.Printf("Error occured while creating section \n %v", err)
+                               return reconcile.Result{}, err
+                       }
+
+                       _, err = hostFile.NewRawSection("ovn-controller", workerString)
+                       if err != nil {
+                               fmt.Printf("Error occured while creating section \n %v", err)
+                               return reconcile.Result{}, err
+                       }
+
+                       _, err = hostFile.NewRawSection("virtlet", workerString)
+                       if err != nil {
+                               fmt.Printf("Error occured while creating section \n %v", err)
+                               return reconcile.Result{}, err
+                       }
+               }
+               _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n"+"kube-master")
+               if err != nil {
+                       fmt.Printf("Error occured while creating section \n %v", err)
+                       return reconcile.Result{}, err
+               }
+
+               //Create host.ini file for KUD
+               hostFile.SaveTo(iniHostFilePath)
+
+               //Install KUD
+               err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, podSubnet, kudPlugins, r.clientset)
+               if err != nil {
+                       fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
+                       return reconcile.Result{}, err
+               }
+
+               //Start separate thread to keep checking job status, Create an IP address configmap
+               //for cluster if KUD is successfully installed
+               go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset)
+
+               return reconcile.Result{}, nil
+
+       }
+
+       ///////////////////////////////////////////////////////////////////////////////////////////////
+       ////////////////         Software CR was created so install software         /////////////////
+       //////////////////////////////////////////////////////////////////////////////////////////////
+       softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
+       defaultSSHPrivateKey := "/root/.ssh/id_rsa"
+
+       //Get IP address configmap for the cluster
+       clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset)
+       if err != nil {
+               fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
                return reconcile.Result{}, err
        }
 
-        mastersList := provisioningInstance.Spec.Masters
-        workersList := provisioningInstance.Spec.Workers
-        bareMetalHostList, _ := listBareMetalHosts()
+       for hostLabel, ipAddress := range clusterConfigMapData {
+
+               if strings.Contains(hostLabel, masterTag) {
+                       // Its a master node, install master software
+                       err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
+                       if err != nil {
+                               fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
+                       }
+               } else if strings.Contains(hostLabel, workerTag) {
+                       // Its a worker node, install worker software
+                       err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
+                       if err != nil {
+                               fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
+                       }
 
+               }
 
-        var allString string
-        var masterString string
-        var workerString string
+       }
 
-       //Iterate through mastersList and get all the mac addresses and IP addresses
+       return reconcile.Result{}, nil
+}
 
-        for _, masterMap := range mastersList {
+//Function to Get List containing baremetal hosts
+func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) {
 
-                for masterLabel, master := range masterMap {
-                   containsMac, bmhCR := checkMACaddress(bareMetalHostList, master.MACaddress)
-                   if containsMac{
-                      //fmt.Println( master.MACaddress)
-                      fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, master.MACaddress)
+       //Create GVR representing a BareMetalHost CR
+       bmhGVR := schema.GroupVersionResource{
+               Group:    "metal3.io",
+               Version:  "v1alpha1",
+               Resource: "baremetalhosts",
+       }
 
-                      //Get IP address of master
-                      hostIPaddress, err := getHostIPaddress(master.MACaddress, dhcpLeaseFile )
-                     if err != nil {
-                        fmt.Printf("IP address not found for host with MAC address %s \n", master.MACaddress)
-                     }
+       //Get List containing all BareMetalHosts CRs
+       bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
+       if err != nil {
+               fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
+               return &unstructured.UnstructuredList{}, err
+       }
 
+       return bareMetalHosts, nil
+}
 
+//Function to check if BareMetalHost containing MAC address exist
+func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
 
-                      allString += masterLabel + "  ansible_host="  + hostIPaddress + " ip=" + hostIPaddress + "\n"
-                      masterString += masterLabel + "\n"
+       //Convert macAddress to byte array for comparison
+       macAddressByte := []byte(macAddress)
+       macBool := false
 
-                      fmt.Printf("%s : %s \n", hostIPaddress, master.MACaddress)
+       for _, bareMetalHost := range bareMetalHostList.Items {
+               bmhJson, _ := bareMetalHost.MarshalJSON()
 
+               macBool = bytes.Contains(bmhJson, macAddressByte)
+               if macBool {
+                       return macBool, bareMetalHost.GetName()
+               }
 
+       }
 
-                   } else {
+       return macBool, ""
 
-                      fmt.Printf("Host with MAC Address %s not found\n", master.MACaddress)
-                   }
-             }
-        }
+}
 
+//Function to get the IP address of a host from the BareMetalHost resource
+func getHostIPaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (string, error) {
 
-        //Iterate through workersList and get all the mac addresses
-        for _, workerMap := range workersList {
+       for _, bareMetalHost := range bareMetalHostList.Items {
+               status, ok := bareMetalHost.Object["status"].(map[string]interface{})
+               if !ok {
+                       continue
+               }
+               hardware, ok := status["hardware"].(map[string]interface{})
+               if !ok {
+                       continue
+               }
+               nics, ok := hardware["nics"].([]interface{})
+               if !ok {
+                       continue
+               }
+               for _, nic := range nics {
+                       n, ok := nic.(map[string]interface{})
+                       if !ok {
+                               continue
+                       }
+                       ip, ok := n["ip"].(string)
+                       if !ok {
+                               continue
+                       }
+                       if macAddress == n["mac"] {
+                               return ip, nil
+                       }
+               }
+       }
+       return "", nil
+}
 
-                for workerLabel, worker := range workerMap {
-                   containsMac, bmhCR := checkMACaddress(bareMetalHostList, worker.MACaddress)
-                   if containsMac{
-                      //fmt.Println( worker.MACaddress)
-                      fmt.Printf("Host %s matches that macAddress\n", bmhCR)
+//Function to create configmap
+func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error {
 
-                      //Get IP address of worker
-                      hostIPaddress, err := getHostIPaddress(worker.MACaddress, dhcpLeaseFile )
-                     if err != nil {
-                        fmt.Printf("IP address not found for host with MAC address %s \n", worker.MACaddress)
-                     }
-                      fmt.Printf("%s : %s \n", hostIPaddress, worker.MACaddress)
+       configmapClient := clientset.CoreV1().ConfigMaps(namespace)
 
-                      allString += workerLabel + "  ansible_host="  + hostIPaddress + " ip=" + hostIPaddress + "\n"
-                      workerString += workerLabel + "\n"
+       configmap := &corev1.ConfigMap{
 
-                   }else {
+               ObjectMeta: metav1.ObjectMeta{
+                       Name:   labels["cluster"] + "-configmap",
+                       Labels: labels,
+               },
+               Data: data,
+       }
 
-                      fmt.Printf("Host with MAC Address %s not found\n", worker.MACaddress)
-                   }
+       _, err := configmapClient.Create(configmap)
+       if err != nil {
+               return err
 
-             }
-        }
+       }
+       return nil
 
+}
 
-        //Create host.ini file
-        iniHostFilePath := provisioningInstance.Spec.HostsFile
-        newFile, err := os.Create(iniHostFilePath)
-        defer newFile.Close()
+//Function to get configmap Data
+func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) {
 
+       configmapClient := clientset.CoreV1().ConfigMaps(namespace)
+       configmapName := clusterName + "-configmap"
+       clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{})
+       if err != nil {
+               return nil, err
+       }
 
-        if err != nil {
-           fmt.Printf("Error occured while creating file \n %v", err)
-        }
+       configmapData := clusterConfigmap.Data
+       return configmapData, nil
+}
 
-        hostFile, err := ini.Load(iniHostFilePath)
-        if err != nil {
-           fmt.Printf("Error occured while Loading file \n %v", err)
-        }
+//Function to create job for KUD installation
+func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, podSubnet string, kudPlugins []string, clientset kubernetes.Interface) error {
 
-        _, err = hostFile.NewRawSection("all", allString)
-        if err != nil {
-           fmt.Printf("Error occured while creating section \n %v", err)
-        }
-        _, err = hostFile.NewRawSection("kube-master", masterString)
-        if err != nil {
-           fmt.Printf("Error occured while creating section \n %v", err)
-        }
+       var backOffLimit int32 = 0
+       var privi bool = true
 
-        _, err = hostFile.NewRawSection("kube-node", workerString)
-        if err != nil {
-           fmt.Printf("Error occured while creating section \n %v", err)
-        }
+       installerString := " ./installer --cluster " + clusterName
+       if len(podSubnet) > 0 {
+               installerString += " --network " + podSubnet
+       }
 
-        _, err = hostFile.NewRawSection("etcd", masterString)
-        if err != nil {
-           fmt.Printf("Error occured while creating section \n %v", err)
-        }
+       // Check if any plugin was specified
+       if len(kudPlugins) > 0 {
+               plugins := " --plugins"
 
-        _, err = hostFile.NewRawSection("k8s-cluser:children", "kube-node\n" + "kube-master")
-        if err != nil {
-           fmt.Printf("Error occured while creating section \n %v", err)
-        }
+               for _, plug := range kudPlugins {
+                       plugins += " " + plug
+               }
 
+               installerString += plugins
+       }
+
+       jobClient := clientset.BatchV1().Jobs("default")
+
+       job := &batchv1.Job{
+
+               ObjectMeta: metav1.ObjectMeta{
+                       Name:   "kud-" + clusterName,
+                       Labels: labels,
+               },
+               Spec: batchv1.JobSpec{
+                       Template: corev1.PodTemplateSpec{
+                               ObjectMeta: metav1.ObjectMeta{
+                                       Labels: labels,
+                               },
+
+                               Spec: corev1.PodSpec{
+                                       HostNetwork: true,
+                                       Containers: []corev1.Container{{
+                                               Name:            "kud",
+                                               Image:           "github.com/onap/multicloud-k8s:latest",
+                                               ImagePullPolicy: "IfNotPresent",
+                                               VolumeMounts: []corev1.VolumeMount{{
+                                                       Name:      "multi-cluster",
+                                                       MountPath: "/opt/kud/multi-cluster",
+                                               },
+                                                       {
+                                                               Name:      "secret-volume",
+                                                               MountPath: "/.ssh",
+                                                       },
+                                               },
+                                               EnvFrom: []corev1.EnvFromSource{
+                                                       {
+                                                               ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "kud-installer"}},
+                                                       },
+                                               },
+                                               Command: []string{"/bin/sh", "-c"},
+                                               Args:    []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
+                                               SecurityContext: &corev1.SecurityContext{
+                                                       Privileged: &privi,
+                                               },
+                                       },
+                                       },
+                                       Volumes: []corev1.Volume{{
+                                               Name: "multi-cluster",
+                                               VolumeSource: corev1.VolumeSource{
+                                                       HostPath: &corev1.HostPathVolumeSource{
+                                                               Path: "/opt/kud/multi-cluster",
+                                                       }}},
+                                               {
+                                                       Name: "secret-volume",
+                                                       VolumeSource: corev1.VolumeSource{
+                                                               Secret: &corev1.SecretVolumeSource{
+                                                                       SecretName: "ssh-key-secret",
+                                                               },
+                                                       }}},
+                                       RestartPolicy: "Never",
+                               },
+                       },
+                       BackoffLimit: &backOffLimit,
+               },
+       }
+       _, err := jobClient.Create(job)
+       if err != nil {
+               fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
+               return err
+       }
+       return nil
 
-        hostFile.SaveTo(iniHostFilePath)
-       return reconcile.Result{}, nil
 }
 
+//Function to Check if job succeeded
+func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) {
+
+       fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
+       jobName := "kud-" + clusterName
+       jobClient := clientset.BatchV1().Jobs(namespace)
+
+       for {
+               time.Sleep(2 * time.Second)
+
+               job, err := jobClient.Get(jobName, metav1.GetOptions{})
+               if err != nil {
+                       fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
+                       return
+               }
+               jobSucceeded := job.Status.Succeeded
+               jobFailed := job.Status.Failed
+
+               if jobSucceeded == 1 {
+                       fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
+
+                       //KUD was installed successfully create configmap to store IP address info for the cluster
+                       err = createConfigMap(data, labels, namespace, clientset)
+                       if err != nil {
+                               fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err)
+                               return
+                       }
+                       return
+               }
+
+               if jobFailed == 1 {
+                       fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
+                       return
+               }
+
+       }
+       return
 
-//Function to Get List containing baremetal hosts
-func listBareMetalHosts() (*unstructured.UnstructuredList, error) {
-
-     //Get Current User and kube config file
-     usr, err := user.Current()
-     if err != nil {
-        fmt.Println("Could not get current user\n")
-        return &unstructured.UnstructuredList{}, err
-     }
-
-     kubeConfig := filepath.Join(usr.HomeDir, ".kube", "config")
-
-     //Build Config Flags
-     config, err :=  clientcmd.BuildConfigFromFlags("", kubeConfig)
-     if err != nil {
-        fmt.Println("Could not build config\n")
-        return &unstructured.UnstructuredList{}, err
-     }
-
-    //Create Dynamic Client  for BareMetalHost CRD
-    bmhDynamicClient, err := dynamic.NewForConfig(config)
-
-    if err != nil {
-       fmt.Println("Could not create dynamic client for bareMetalHosts\n")
-       return &unstructured.UnstructuredList{}, err
-    }
-
-    //Create GVR representing a BareMetalHost CR
-    bmhGVR := schema.GroupVersionResource{
-      Group:    "metal3.io",
-      Version:  "v1alpha1",
-      Resource: "baremetalhosts",
-    }
-
-    //Get List containing all BareMetalHosts CRs
-    bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
-    if err != nil {
-       fmt.Println("Error occured, cannot get BareMetalHosts list\n")
-       return &unstructured.UnstructuredList{}, err
-    }
-
-    return bareMetalHosts, nil
 }
 
-//Function to check if BareMetalHost containing MAC address exist
-func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
+//Function to get software list from software CR
+func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
+
+       CRclusterName := softwareCR.GetLabels()["cluster"]
+
+       masterSofwareList := softwareCR.Spec.MasterSoftware
+       workerSoftwareList := softwareCR.Spec.WorkerSoftware
+
+       return CRclusterName, masterSofwareList, workerSoftwareList
+}
+
+//Function to install software in clusterHosts
+func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
+
+       var installString string
+       for _, software := range softwareList {
+
+               switch t := software.(type) {
+               case string:
+                       installString += software.(string) + " "
+               case interface{}:
+                       softwareMap, errBool := software.(map[string]interface{})
+                       if !errBool {
+                               fmt.Printf("Error occured, cannot install software %v\n", software)
+                       }
+                       for softwareName, versionMap := range softwareMap {
+
+                               versionMAP, _ := versionMap.(map[string]interface{})
+                               version := versionMAP["version"].(string)
+                               installString += softwareName + "=" + version + " "
+                       }
+               default:
+                       fmt.Printf("invalid format %v\n", t)
+               }
+
+       }
+
+       err := sshInstaller(installString, sshPrivateKey, ipAddress)
+       if err != nil {
+               return err
+       }
+       return nil
+
+}
 
-     //Convert macAddress to byte array for comparison
-     macAddressByte :=  []byte(macAddress)
-     macBool := false
+//Function to Run Installation commands via ssh
+func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
 
-     for _, bareMetalHost := range bareMetalHostList.Items {
-         bmhJson, _ := bareMetalHost.MarshalJSON()
+       buffer, err := ioutil.ReadFile(sshPrivateKey)
+       if err != nil {
+               return err
+       }
+
+       key, err := ssh.ParsePrivateKey(buffer)
+       if err != nil {
+               return err
+       }
+
+       sshConfig := &ssh.ClientConfig{
+               User: "root",
+               Auth: []ssh.AuthMethod{
+                       ssh.PublicKeys(key),
+               },
+
+               HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+       }
+
+       client, err := ssh.Dial("tcp", ipAddress+":22", sshConfig)
+       if err != nil {
+               return err
+       }
+
+       session, err := client.NewSession()
+       if err != nil {
+               return err
+       }
+
+       defer session.Close()
+       defer client.Close()
+
+       cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
+       err = session.Start(cmd)
+
+       if err != nil {
+               return err
+       }
 
-         macBool = bytes.Contains(bmhJson, macAddressByte)
-         if macBool{
-             return macBool, bareMetalHost.GetName()
-         }
+       return nil
+
+}
 
-      }
+func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
 
-         return macBool, ""
+       var vmPodList []VirtletVM
 
+       pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
+       if err != nil {
+               fmt.Printf("Could not get pod info, Error: %v\n", err)
+               return []VirtletVM{}, err
+       }
+
+       for _, pod := range pods.Items {
+               var podAnnotation map[string]interface{}
+               var podStatus corev1.PodStatus
+               var podDefaultNetStatus []NetworksStatus
+
+               annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
+               if err != nil {
+                       fmt.Printf("Could not get pod annotations, Error: %v\n", err)
+                       return []VirtletVM{}, err
+               }
+
+               json.Unmarshal([]byte(annotation), &podAnnotation)
+               if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
+                       runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
+
+                       podStatusJson, _ := json.Marshal(pod.Status)
+                       json.Unmarshal([]byte(podStatusJson), &podStatus)
+
+                       if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
+                               ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
+                               json.Unmarshal([]byte(ns), &podDefaultNetStatus)
+
+                               vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
+                       }
+               }
+       }
+
+       return vmPodList, nil
 }
 
-func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, error) {
-
-     //Read the dhcp lease file
-     dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
-     if err != nil {
-        fmt.Println("Failed to read lease file\n")
-        return "", err
-     }
-
-     dhcpLeases := string(dhcpFile)
-
-     //Regex to use to search dhcpLeases
-     regex := "lease.*{|ethernet.*"
-     re, err := regexp.Compile(regex)
-     if err != nil {
-        fmt.Println("Could not create Regexp object\n")
-        return "", err
-     }
-
-     //Get String containing leased Ip addresses and Corressponding MAC addresses
-     out := re.FindAllString(dhcpLeases, -1)
-     outString := strings.Join(out, " ")
-     stringReplacer := strings.NewReplacer("lease", "", "{ ethernet ", "", ";", "")
-     replaced := stringReplacer.Replace(outString)
-     ipMacList := strings.Fields(replaced)
-
-
-     //Get IP addresses corresponding to Input MAC Address
-     for idx := len(ipMacList)-1 ; idx >= 0; idx -- {
-         item := ipMacList[idx]
-         if item == macAddress  {
-            ipAdd := ipMacList[idx -1]
-            return ipAdd, nil
-    }
-
- }
-     return "", nil
+func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
+
+       for i := 0; i < len(vmList); i++ {
+               if vmList[i].MACaddress == macAddress {
+                       return vmList[i].IPaddress, nil
+               }
+       }
+       return "", nil
 }