3e00456979d5c524705e8135f745cacbde049250
[icn.git] / cmd / bpa-operator / pkg / controller / provisioning / provisioning_controller.go
1 package provisioning
2
3 import (
4         "context"
5         "os"
6         "fmt"
7         "time"
8         "bytes"
9         "regexp"
10         "strings"
11         "io/ioutil"
12         "encoding/json"
13
14         bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
15         metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
16         corev1 "k8s.io/api/core/v1"
17         batchv1 "k8s.io/api/batch/v1"
18         "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
19         "k8s.io/apimachinery/pkg/runtime/schema"
20         "k8s.io/apimachinery/pkg/api/errors"
21         "k8s.io/apimachinery/pkg/runtime"
22         "k8s.io/client-go/dynamic"
23
24         "k8s.io/client-go/kubernetes"
25         "sigs.k8s.io/controller-runtime/pkg/client"
26         "sigs.k8s.io/controller-runtime/pkg/client/config"
27         "sigs.k8s.io/controller-runtime/pkg/controller"
28         "sigs.k8s.io/controller-runtime/pkg/handler"
29         "sigs.k8s.io/controller-runtime/pkg/manager"
30         "sigs.k8s.io/controller-runtime/pkg/reconcile"
31         logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
32         "sigs.k8s.io/controller-runtime/pkg/source"
33         "gopkg.in/ini.v1"
34         "golang.org/x/crypto/ssh"
35 )
36
37 type VirtletVM struct {
38         IPaddress string
39         MACaddress string
40 }
41
42 type NetworksStatus struct {
43         Name string `json:"name,omitempty"`
44         Interface string `json:"interface,omitempty"`
45         Ips []string `json:"ips,omitempty"`
46         Mac string `json:"mac,omitempty"`
47         Default bool `json:"default,omitempty"`
48         Dns interface{} `json:"dns,omitempty"`
49 }
50
51 var log = logf.Log.WithName("controller_provisioning")
52
53 /**
54 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
55 * business logic.  Delete these comments after modifying this file.*
56  */
57
58 // Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
59 // and Start it when the Manager is Started.
60 func Add(mgr manager.Manager) error {
61         return add(mgr, newReconciler(mgr))
62 }
63
64 // newReconciler returns a new reconcile.Reconciler
65 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
66
67         config, err :=  config.GetConfig()
68         if err != nil {
69            fmt.Printf("Could not get kube config, Error: %v\n", err)
70         }
71
72        clientSet, err := kubernetes.NewForConfig(config)
73         if err != nil {
74            fmt.Printf("Could not create clientset, Error: %v\n", err)
75         }
76        bmhDynamicClient, err := dynamic.NewForConfig(config)
77
78        if err != nil {
79           fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
80        }
81
82        return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient }
83 }
84
85 // add adds a new Controller to mgr with r as the reconcile.Reconciler
86 func add(mgr manager.Manager, r reconcile.Reconciler) error {
87         // Create a new controller
88         c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
89         if err != nil {
90                 return err
91         }
92
93         // Watch for changes to primary resource Provisioning
94         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
95         if err != nil {
96                 return err
97         }
98
99         // Watch for changes to resource configmap created as a consequence of the provisioning CR
100         err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
101                 IsController: true,
102                 OwnerType:   &bpav1alpha1.Provisioning{},
103         })
104
105         if err != nil {
106                 return err
107         }
108
109        //Watch for changes to job resource also created as a consequence of the provisioning CR
110        err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
111                 IsController: true,
112                 OwnerType:   &bpav1alpha1.Provisioning{},
113         })
114
115         if err != nil {
116                 return err
117         }
118
119         // Watch for changes to resource software CR
120         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
121         if err != nil {
122                 return err
123         }
124
125
126         return nil
127 }
128
129 // blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
130 var _ reconcile.Reconciler = &ReconcileProvisioning{}
131
132 // ReconcileProvisioning reconciles a Provisioning object
133 type ReconcileProvisioning struct {
134         // This client, initialized using mgr.Client() above, is a split client
135         // that reads objects from the cache and writes to the apiserver
136         client client.Client
137         scheme *runtime.Scheme
138         clientset kubernetes.Interface
139         bmhClient dynamic.Interface
140 }
141
142 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
143 // and what is in the Provisioning.Spec
144 // TODO(user): Modify this Reconcile function to implement your Controller logic.  This example creates
145 // a Pod as an example
146 // Note:
147 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
148 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
149 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
150         reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
151         fmt.Printf("\n\n")
152         reqLogger.Info("Reconciling Custom Resource")
153
154
155
156         // Fetch the Provisioning instance
157         provisioningInstance := &bpav1alpha1.Provisioning{}
158         softwareInstance := &bpav1alpha1.Software{}
159         err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
160         provisioningCreated := true
161         if err != nil {
162
163                          //Check if its a Software Instance
164                          err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
165                          if err != nil {
166                              if errors.IsNotFound(err) {
167                                 // Request object not found, could have been deleted after reconcile request.
168                                 // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
169                                 // Return and don't requeue
170                                 return reconcile.Result{}, nil
171                              }
172
173                          // Error reading the object - requeue the request.
174                          return reconcile.Result{}, err
175                          }
176
177                          //No error occured and so a Software CR was created not a Provisoning CR
178                          provisioningCreated = false
179         }
180
181
182         masterTag := "MASTER_"
183         workerTag := "WORKER_"
184
185         if provisioningCreated {
186
187         ///////////////////////////////////////////////////////////////////////////////////////////////
188         ////////////////         Provisioning CR was created so install KUD          /////////////////
189         //////////////////////////////////////////////////////////////////////////////////////////////
190         clusterName := provisioningInstance.Labels["cluster"]
191         clusterType := provisioningInstance.Labels["cluster-type"]
192         mastersList := provisioningInstance.Spec.Masters
193         workersList := provisioningInstance.Spec.Workers
194         kudPlugins := provisioningInstance.Spec.KUDPlugins
195
196
197         bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
198         virtletVMList, _ := listVirtletVMs(r.clientset)
199
200
201
202
203         var allString string
204         var masterString string
205         var workerString string
206
207         dhcpLeaseFile := "/var/lib/dhcp/dhcpd.leases"
208         multiClusterDir := "/multi-cluster"
209
210         //Create Directory for the specific cluster
211         clusterDir := multiClusterDir + "/" + clusterName
212         os.MkdirAll(clusterDir, os.ModePerm)
213
214         //Create Maps to be used for cluster ip address to label configmap
215         clusterLabel := make(map[string]string)
216         clusterLabel["cluster"] = clusterName
217         clusterData := make(map[string]string)
218
219
220
221        //Iterate through mastersList and get all the mac addresses and IP addresses
222        for _, masterMap := range mastersList {
223
224                 for masterLabel, master := range masterMap {
225                    masterMAC := master.MACaddress
226                    hostIPaddress := ""
227
228                    if masterMAC == "" {
229                       err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
230                       return reconcile.Result{}, err
231                    }
232
233                    containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
234
235                    //Check 'cluster-type' label for Virtlet VMs
236                    if clusterType == "virtlet-vm" {
237                        //Get VM IP address of master
238                        hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
239                        if err != nil || hostIPaddress == "" {
240                            err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
241                            return reconcile.Result{}, err
242                        }
243                        containsMac = true
244                    }
245
246                    if containsMac{
247
248                        if clusterType != "virtlet-vm" {
249                            fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
250
251                            //Get IP address of master
252                            hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile )
253                            if err != nil || hostIPaddress == ""{
254                                err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
255                                return reconcile.Result{}, err
256                            }
257                        }
258
259                        allString += masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
260                        masterString += masterLabel + "\n"
261                        clusterData[masterTag + masterLabel] = hostIPaddress
262
263                        fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
264
265                        if len(workersList) != 0 {
266
267                            //Iterate through workersList and get all the mac addresses
268                            for _, workerMap := range workersList {
269
270                                //Get worker labels from the workermap
271                                for workerLabel, worker := range workerMap {
272
273                                    //Check if workerString already contains worker label
274                                    containsWorkerLabel := strings.Contains(workerString, workerLabel)
275                                    workerMAC := worker.MACaddress
276                                    hostIPaddress = ""
277
278                                    //Error occurs if the same label is given to different hosts (assumption,
279                                    //each MAC address represents a unique host
280                                    if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
281                                      if containsWorkerLabel {
282                                             strings.ReplaceAll(workerString, workerLabel, "")
283                                          }
284                                       err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
285                                       different label to node with MACAddress %s`, workerLabel, workerMAC)
286                                       return reconcile.Result{}, err
287
288                                    //same node performs worker and master roles
289                                    } else if workerLabel == masterLabel && !containsWorkerLabel {
290                                         workerString += workerLabel + "\n"
291
292                                         //Add host to ip address config map with worker tag
293                                         hostIPaddress = clusterData[masterTag + masterLabel]
294                                         clusterData[workerTag + masterLabel] = hostIPaddress
295
296                                    //Error occurs if the same node is given different labels
297                                    } else if workerLabel != masterLabel && workerMAC == masterMAC {
298                                          if containsWorkerLabel {
299                                             strings.ReplaceAll(workerString, workerLabel, "")
300                                          }
301                                          err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
302                                                         different label to node with MACAddress %s`, workerLabel, workerMAC)
303                                          return reconcile.Result{}, err
304
305                                    //worker node is different from any master node and it has not been added to the worker list
306                                    } else if workerLabel != masterLabel && !containsWorkerLabel {
307
308                                         // Error occurs if MAC address not provided for worker node not matching master
309                                         if workerMAC == "" {
310                                           err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
311                                           return reconcile.Result{}, err
312                                          }
313
314                                         containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
315
316                                         if clusterType == "virtlet-vm" {
317                                             //Get VM IP address of master
318                                             hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
319                                             if err != nil || hostIPaddress == "" {
320                                                 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
321                                                 return reconcile.Result{}, err
322                                             }
323                                             containsMac = true
324                                         }
325
326                                         if containsMac{
327
328                                            if clusterType != "virtlet-vm" {
329                                                fmt.Printf("Host %s matches that macAddress\n", bmhCR)
330
331                                                //Get IP address of worker
332                                                hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile )
333                                                if err != nil {
334                                                    fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
335                                                    return reconcile.Result{}, err
336                                                }
337                                            }
338                                            fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
339
340
341                                            allString += workerLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
342                                            workerString += workerLabel + "\n"
343                                            clusterData[workerTag + workerLabel] = hostIPaddress
344
345                                        //No host found that matches the worker MAC
346                                        } else {
347
348                                             err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
349                                             return reconcile.Result{}, err
350                                           }
351                                    }
352                              }
353                        }
354                    //No worker node specified, add master as worker node
355                    } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
356                        workerString += masterLabel + "\n"
357
358                        //Add host to ip address config map with worker tag
359                        hostIPaddress = clusterData[masterTag + masterLabel]
360                        clusterData[workerTag + masterLabel] = hostIPaddress
361                    }
362
363                    //No host matching master MAC found
364                    } else {
365                       err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
366                       return reconcile.Result{}, err
367                    }
368              }
369         }
370
371         //Create host.ini file
372         //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini"
373         iniHostFilePath := clusterDir + "/hosts.ini"
374         newFile, err := os.Create(iniHostFilePath)
375         defer newFile.Close()
376
377
378         if err != nil {
379            fmt.Printf("Error occured while creating file \n %v", err)
380            return reconcile.Result{}, err
381         }
382
383         hostFile, err := ini.Load(iniHostFilePath)
384         if err != nil {
385            fmt.Printf("Error occured while Loading file \n %v", err)
386            return reconcile.Result{}, err
387         }
388
389         _, err = hostFile.NewRawSection("all", allString)
390         if err != nil {
391            fmt.Printf("Error occured while creating section \n %v", err)
392            return reconcile.Result{}, err
393         }
394         _, err = hostFile.NewRawSection("kube-master", masterString)
395         if err != nil {
396            fmt.Printf("Error occured while creating section \n %v", err)
397            return reconcile.Result{}, err
398         }
399
400         _, err = hostFile.NewRawSection("kube-node", workerString)
401         if err != nil {
402            fmt.Printf("Error occured while creating section \n %v", err)
403            return reconcile.Result{}, err
404         }
405
406         _, err = hostFile.NewRawSection("etcd", masterString)
407         if err != nil {
408            fmt.Printf("Error occured while creating section \n %v", err)
409            return reconcile.Result{}, err
410         }
411
412         if clusterType == "virtlet-vm" {
413                 _, err = hostFile.NewRawSection("ovn-central", masterString)
414                 if err != nil {
415                         fmt.Printf("Error occured while creating section \n %v", err)
416                         return reconcile.Result{}, err
417                 }
418                 _, err = hostFile.NewRawSection("ovn-controller", masterString)
419                 if err != nil {
420                         fmt.Printf("Error occured while creating section \n %v", err)
421                         return reconcile.Result{}, err
422                 }
423         }
424
425         _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master")
426         if err != nil {
427            fmt.Printf("Error occured while creating section \n %v", err)
428            return reconcile.Result{}, err
429         }
430
431
432         //Create host.ini file for KUD
433         hostFile.SaveTo(iniHostFilePath)
434
435         //Install KUD
436         err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, kudPlugins,  r.clientset)
437         if err != nil {
438            fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
439            return reconcile.Result{}, err
440         }
441
442         //Start separate thread to keep checking job status, Create an IP address configmap
443         //for cluster if KUD is successfully installed
444         go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset)
445
446         return reconcile.Result{}, nil
447
448        }
449
450
451
452         ///////////////////////////////////////////////////////////////////////////////////////////////
453         ////////////////         Software CR was created so install software         /////////////////
454         //////////////////////////////////////////////////////////////////////////////////////////////
455         softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
456         defaultSSHPrivateKey := "/root/.ssh/id_rsa"
457
458         //Get IP address configmap for the cluster
459         clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset)
460         if err != nil {
461            fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
462            return reconcile.Result{}, err
463         }
464
465         for hostLabel, ipAddress := range clusterConfigMapData {
466
467             if strings.Contains(hostLabel, masterTag) {
468                // Its a master node, install master software
469                err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
470                if err != nil {
471                   fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
472                }
473             } else if strings.Contains(hostLabel, workerTag) {
474               // Its a worker node, install worker software
475               err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
476               if err != nil {
477                   fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
478                }
479
480             }
481
482         }
483
484         return reconcile.Result{}, nil
485 }
486
487 //Function to Get List containing baremetal hosts
488 func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) {
489
490     //Create GVR representing a BareMetalHost CR
491     bmhGVR := schema.GroupVersionResource{
492       Group:    "metal3.io",
493       Version:  "v1alpha1",
494       Resource: "baremetalhosts",
495     }
496
497     //Get List containing all BareMetalHosts CRs
498     bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
499     if err != nil {
500        fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
501        return &unstructured.UnstructuredList{}, err
502     }
503
504     return bareMetalHosts, nil
505 }
506
507
508 //Function to check if BareMetalHost containing MAC address exist
509 func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
510
511      //Convert macAddress to byte array for comparison
512      macAddressByte :=  []byte(macAddress)
513      macBool := false
514
515      for _, bareMetalHost := range bareMetalHostList.Items {
516          bmhJson, _ := bareMetalHost.MarshalJSON()
517
518          macBool = bytes.Contains(bmhJson, macAddressByte)
519          if macBool{
520              return macBool, bareMetalHost.GetName()
521          }
522
523       }
524
525          return macBool, ""
526
527 }
528
529
530 //Function to get the IP address of a host from the DHCP file
531 func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, error) {
532
533      //Read the dhcp lease file
534      dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
535      if err != nil {
536         fmt.Printf("Failed to read lease file\n")
537         return "", err
538      }
539
540      dhcpLeases := string(dhcpFile)
541
542      //Regex to use to search dhcpLeases
543      reg := "lease.*{|ethernet.*|\n. binding state.*"
544      re, err := regexp.Compile(reg)
545      if err != nil {
546         fmt.Printf("Could not create Regexp object, Error %v occured\n", err)
547         return "", err
548      }
549
550      //Get String containing leased Ip addresses and Corressponding MAC addresses
551      out := re.FindAllString(dhcpLeases, -1)
552      outString := strings.Join(out, " ")
553      stringReplacer := strings.NewReplacer("lease", "", "ethernet ", "", ";", "",
554      " binding state", "", "{", "")
555      replaced := stringReplacer.Replace(outString)
556      ipMacList := strings.Fields(replaced)
557
558
559      //Get IP addresses corresponding to Input MAC Address
560      for idx := len(ipMacList)-1 ; idx >= 0; idx -- {
561          item := ipMacList[idx]
562          if item == macAddress  {
563
564             leaseState := ipMacList[idx -1]
565             if leaseState != "active" {
566                err := fmt.Errorf("No active ip address lease found for MAC address %s \n", macAddress)
567                fmt.Printf("%v\n", err)
568                return "", err
569             }
570             ipAdd := ipMacList[idx - 2]
571             return ipAdd, nil
572     }
573
574  }
575      return "", nil
576 }
577
578 //Function to create configmap 
579 func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error{
580
581      configmapClient := clientset.CoreV1().ConfigMaps(namespace)
582
583      configmap := &corev1.ConfigMap{
584
585         ObjectMeta: metav1.ObjectMeta{
586                         Name: labels["cluster"] + "-configmap",
587                         Labels: labels,
588                 },
589         Data: data,
590      }
591
592
593       _, err := configmapClient.Create(configmap)
594       if err != nil {
595          return err
596
597       }
598       return nil
599
600 }
601
602 //Function to get configmap Data
603 func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) {
604
605      configmapClient := clientset.CoreV1().ConfigMaps(namespace)
606      configmapName := clusterName + "-configmap"
607      clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{})
608      if err != nil {
609         return nil, err
610      }
611
612      configmapData := clusterConfigmap.Data
613      return configmapData, nil
614 }
615
616 //Function to create job for KUD installation
617 func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, kudPlugins []string, clientset kubernetes.Interface) error{
618
619     var backOffLimit int32 = 0
620     var privi bool = true
621
622     installerString := " ./installer --cluster " + clusterName
623
624     // Check if any plugin was specified
625     if len(kudPlugins) > 0 {
626             plugins := " --plugins"
627
628             for _, plug := range kudPlugins {
629                plugins += " " + plug
630             }
631
632            installerString += plugins
633     }
634
635
636     jobClient := clientset.BatchV1().Jobs("default")
637
638         job := &batchv1.Job{
639
640         ObjectMeta: metav1.ObjectMeta{
641                         Name: "kud-" + clusterName,
642                        Labels: labels,
643                 },
644                 Spec: batchv1.JobSpec{
645                       Template: corev1.PodTemplateSpec{
646                                 ObjectMeta: metav1.ObjectMeta{
647                                         Labels: labels,
648                                 },
649
650
651                         Spec: corev1.PodSpec{
652                               HostNetwork: true,
653                               Containers: []corev1.Container{{
654                                           Name: "kud",
655                                           Image: "github.com/onap/multicloud-k8s:latest",
656                                           ImagePullPolicy: "IfNotPresent",
657                                           VolumeMounts: []corev1.VolumeMount{{
658                                                         Name: "multi-cluster",
659                                                         MountPath: "/opt/kud/multi-cluster",
660                                                         },
661                                                         {
662                                                         Name: "secret-volume",
663                                                         MountPath: "/.ssh",
664                                                         },
665
666                                            },
667                                            Command: []string{"/bin/sh","-c"},
668                                            Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
669                                            SecurityContext: &corev1.SecurityContext{
670                                                             Privileged : &privi,
671
672                                            },
673                                           },
674                                  },
675                                  Volumes: []corev1.Volume{{
676                                           Name: "multi-cluster",
677                                           VolumeSource: corev1.VolumeSource{
678                                                        HostPath: &corev1.HostPathVolumeSource{
679                                                               Path : "/opt/kud/multi-cluster",
680                                                      }}},
681                                           {
682                                           Name: "secret-volume",
683                                           VolumeSource: corev1.VolumeSource{
684                                                         Secret: &corev1.SecretVolumeSource{
685                                                               SecretName: "ssh-key-secret",
686                                                         },
687
688                                           }}},
689                                  RestartPolicy: "Never",
690                               },
691
692                              },
693                              BackoffLimit : &backOffLimit,
694                              },
695
696                          }
697                     _, err := jobClient.Create(job)
698                     if err != nil {
699                        fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
700                        return err
701                     }
702                     return nil
703
704 }
705
706 //Function to Check if job succeeded
707 func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) {
708
709      fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
710      jobName := "kud-" + clusterName
711      jobClient := clientset.BatchV1().Jobs(namespace)
712
713      for {
714          time.Sleep(2 * time.Second)
715
716          job, err := jobClient.Get(jobName, metav1.GetOptions{})
717          if err != nil {
718             fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
719             return
720          }
721          jobSucceeded := job.Status.Succeeded
722          jobFailed := job.Status.Failed
723
724          if jobSucceeded == 1 {
725             fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
726
727             //KUD was installed successfully create configmap to store IP address info for the cluster
728             err = createConfigMap(data, labels, namespace, clientset)
729             if err != nil {
730                fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err)
731                return
732             }
733             return
734          }
735
736         if jobFailed == 1 {
737            fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
738            return
739         }
740
741      }
742     return
743
744 }
745
746 //Function to get software list from software CR
747 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
748
749      CRclusterName := softwareCR.GetLabels()["cluster"]
750
751      masterSofwareList := softwareCR.Spec.MasterSoftware
752      workerSoftwareList := softwareCR.Spec.WorkerSoftware
753
754      return CRclusterName, masterSofwareList, workerSoftwareList
755 }
756
757 //Function to install software in clusterHosts
758 func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
759
760      var installString string
761      for _, software := range softwareList {
762
763         switch t := software.(type){
764         case string:
765              installString += software.(string) + " "
766         case interface{}:
767              softwareMap, errBool := software.(map[string]interface{})
768              if !errBool {
769                 fmt.Printf("Error occured, cannot install software %v\n", software)
770              }
771              for softwareName, versionMap := range softwareMap {
772
773                  versionMAP, _ := versionMap.(map[string]interface{})
774                  version := versionMAP["version"].(string)
775                  installString += softwareName + "=" + version + " "
776              }
777         default:
778             fmt.Printf("invalid format %v\n", t)
779         }
780
781      }
782
783      err := sshInstaller(installString, sshPrivateKey, ipAddress)
784      if err != nil {
785         return err
786      }
787      return nil
788
789 }
790
791 //Function to Run Installation commands via ssh
792 func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
793
794      buffer, err := ioutil.ReadFile(sshPrivateKey)
795      if err != nil {
796         return err
797      }
798
799      key, err := ssh.ParsePrivateKey(buffer)
800      if err != nil {
801         return err
802      }
803
804      sshConfig := &ssh.ClientConfig{
805         User: "root",
806         Auth: []ssh.AuthMethod{
807               ssh.PublicKeys(key),
808      },
809
810      HostKeyCallback: ssh.InsecureIgnoreHostKey(),
811      }
812
813     client, err := ssh.Dial("tcp", ipAddress + ":22", sshConfig)
814     if err != nil {
815        return err
816     }
817
818     session, err := client.NewSession()
819     if err != nil {
820        return err
821     }
822
823     defer session.Close()
824     defer client.Close()
825
826     cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
827     err = session.Start(cmd)
828
829     if err != nil {
830        return err
831     }
832
833     return nil
834
835 }
836
837 func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
838
839         var vmPodList []VirtletVM
840
841         pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
842         if err != nil {
843                 fmt.Printf("Could not get pod info, Error: %v\n", err)
844                 return []VirtletVM{}, err
845         }
846
847         for _, pod := range pods.Items {
848                 var podAnnotation map[string]interface{}
849                 var podStatus corev1.PodStatus
850                 var podDefaultNetStatus []NetworksStatus
851
852                 annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
853                 if err != nil {
854                         fmt.Printf("Could not get pod annotations, Error: %v\n", err)
855                         return []VirtletVM{}, err
856                 }
857
858                 json.Unmarshal([]byte(annotation), &podAnnotation)
859                 if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
860                         runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
861
862                         podStatusJson, _ := json.Marshal(pod.Status)
863                         json.Unmarshal([]byte(podStatusJson), &podStatus)
864
865                         if runtime  == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["v1.multus-cni.io/default-network"] != nil {
866                                 ns := podAnnotation["v1.multus-cni.io/default-network"].(string)
867                                 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
868
869                                 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
870                         }
871                 }
872         }
873
874         return vmPodList, nil
875 }
876
877 func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
878
879         for i := 0; i < len(vmList); i++ {
880                 if vmList[i].MACaddress == macAddress {
881                         return vmList[i].IPaddress, nil
882                 }
883         }
884         return "", nil
885 }