dd13d88ec4df159c19700e22a8c9706fd3d0ba50
[icn.git] / cmd / bpa-operator / pkg / controller / provisioning / provisioning_controller.go
1 package provisioning
2
3 import (
4         "context"
5         "os"
6         "fmt"
7         "time"
8         "bytes"
9         "regexp"
10         "strings"
11         "io/ioutil"
12         "encoding/json"
13
14         bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
15         metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
16         corev1 "k8s.io/api/core/v1"
17         batchv1 "k8s.io/api/batch/v1"
18         "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
19         "k8s.io/apimachinery/pkg/runtime/schema"
20         "k8s.io/apimachinery/pkg/api/errors"
21         "k8s.io/apimachinery/pkg/runtime"
22         "k8s.io/client-go/dynamic"
23         "k8s.io/client-go/rest"
24
25         "k8s.io/client-go/kubernetes"
26         "sigs.k8s.io/controller-runtime/pkg/client"
27         "sigs.k8s.io/controller-runtime/pkg/client/config"
28         "sigs.k8s.io/controller-runtime/pkg/controller"
29         "sigs.k8s.io/controller-runtime/pkg/handler"
30         "sigs.k8s.io/controller-runtime/pkg/manager"
31         "sigs.k8s.io/controller-runtime/pkg/reconcile"
32         logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
33         "sigs.k8s.io/controller-runtime/pkg/source"
34         "gopkg.in/ini.v1"
35         "golang.org/x/crypto/ssh"
36 )
37
38 type VirtletVM struct {
39         IPaddress string
40         MACaddress string
41 }
42
43 type NetworksStatus struct {
44         Name string `json:"name,omitempty"`
45         Interface string `json:"interface,omitempty"`
46         Ips []string `json:"ips,omitempty"`
47         Mac string `json:"mac,omitempty"`
48         Default bool `json:"default,omitempty"`
49         Dns interface{} `json:"dns,omitempty"`
50 }
51
52 var log = logf.Log.WithName("controller_provisioning")
53
54 /**
55 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
56 * business logic.  Delete these comments after modifying this file.*
57  */
58
59 // Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
60 // and Start it when the Manager is Started.
61 func Add(mgr manager.Manager) error {
62         return add(mgr, newReconciler(mgr))
63 }
64
65 // newReconciler returns a new reconcile.Reconciler
66 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
67         return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme()}
68 }
69
70 // add adds a new Controller to mgr with r as the reconcile.Reconciler
71 func add(mgr manager.Manager, r reconcile.Reconciler) error {
72         // Create a new controller
73         c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
74         if err != nil {
75                 return err
76         }
77
78         // Watch for changes to primary resource Provisioning
79         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
80         if err != nil {
81                 return err
82         }
83
84         // Watch for changes to resource configmap created as a consequence of the provisioning CR
85         err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
86                 IsController: true,
87                 OwnerType:   &bpav1alpha1.Provisioning{},
88         })
89
90         if err != nil {
91                 return err
92         }
93
94        //Watch for changes to job resource also created as a consequence of the provisioning CR
95        err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
96                 IsController: true,
97                 OwnerType:   &bpav1alpha1.Provisioning{},
98         })
99
100         if err != nil {
101                 return err
102         }
103
104         // Watch for changes to resource software CR
105         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
106         if err != nil {
107                 return err
108         }
109
110
111         return nil
112 }
113
114 // blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
115 var _ reconcile.Reconciler = &ReconcileProvisioning{}
116
117 // ReconcileProvisioning reconciles a Provisioning object
118 type ReconcileProvisioning struct {
119         // This client, initialized using mgr.Client() above, is a split client
120         // that reads objects from the cache and writes to the apiserver
121         client client.Client
122         scheme *runtime.Scheme
123 }
124
125 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
126 // and what is in the Provisioning.Spec
127 // TODO(user): Modify this Reconcile function to implement your Controller logic.  This example creates
128 // a Pod as an example
129 // Note:
130 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
131 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
132 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
133         reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
134         //reqLogger.Info("Reconciling Provisioning")
135         fmt.Printf("\n\n")
136         reqLogger.Info("Reconciling Custom Resource")
137
138
139
140         // Fetch the Provisioning instance
141         provisioningInstance := &bpav1alpha1.Provisioning{}
142         softwareInstance := &bpav1alpha1.Software{}
143         err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
144         provisioningCreated := true
145         if err != nil {
146
147                          //Check if its a Software Instance
148                          err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
149                          if err != nil {
150                              if errors.IsNotFound(err) {
151                                 // Request object not found, could have been deleted after reconcile request.
152                                 // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
153                                 // Return and don't requeue
154                                 return reconcile.Result{}, nil
155                              }
156
157                          // Error reading the object - requeue the request.
158                          return reconcile.Result{}, err
159                          }
160
161                          //No error occured and so a Software CR was created not a Provisoning CR
162                          provisioningCreated = false
163         }
164
165
166         masterTag := "MASTER_"
167         workerTag := "WORKER_"
168
169         config, err :=  config.GetConfig()
170         if err != nil {
171            fmt.Printf("Could not get kube config, Error: %v\n", err)
172            return reconcile.Result{}, err
173         }
174
175         clientset, err := kubernetes.NewForConfig(config)
176         if err != nil {
177            fmt.Printf("Could not create clientset, Error: %v\n", err)
178            return reconcile.Result{}, err
179         }
180         if provisioningCreated {
181
182         ///////////////////////////////////////////////////////////////////////////////////////////////
183         ////////////////         Provisioning CR was created so install KUD          /////////////////
184         //////////////////////////////////////////////////////////////////////////////////////////////
185         clusterName := provisioningInstance.Labels["cluster"]
186         clusterType := provisioningInstance.Labels["cluster-type"]
187         mastersList := provisioningInstance.Spec.Masters
188         workersList := provisioningInstance.Spec.Workers
189         dhcpLeaseFile := provisioningInstance.Spec.DHCPleaseFile
190         kudInstallerScript := provisioningInstance.Spec.KUDInstaller
191         multiClusterDir := provisioningInstance.Spec.MultiClusterPath
192
193
194         bareMetalHostList, _ := listBareMetalHosts(config)
195         virtletVMList, _ := listVirtletVMs()
196
197         var allString string
198         var masterString string
199         var workerString string
200
201         defaultDHCPFile := "/var/lib/dhcp/dhcpd.leases"
202         defaultKUDInstallerPath := "/multicloud-k8s/kud/hosting_providers/vagrant"
203         defaultMultiClusterDir := "/multi-cluster"
204
205         //Give Default values for paths if no path is given in the CR
206         if dhcpLeaseFile == "" {
207            dhcpLeaseFile = defaultDHCPFile
208         }
209
210         if kudInstallerScript == "" {
211            kudInstallerScript = defaultKUDInstallerPath
212         }
213
214         if multiClusterDir == "" {
215            multiClusterDir = defaultMultiClusterDir
216         }
217
218         //Create Directory for the specific cluster
219         clusterDir := multiClusterDir + "/" + clusterName
220         os.MkdirAll(clusterDir, os.ModePerm)
221
222         //Create Maps to be used for cluster ip address to label configmap
223         clusterLabel := make(map[string]string)
224         clusterLabel["cluster"] = clusterName
225         clusterData := make(map[string]string)
226
227
228
229        //Iterate through mastersList and get all the mac addresses and IP addresses
230        for _, masterMap := range mastersList {
231
232                 for masterLabel, master := range masterMap {
233                    masterMAC := master.MACaddress
234                    hostIPaddress := ""
235
236                    if masterMAC == "" {
237                       err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
238                       return reconcile.Result{}, err
239                    }
240
241                    containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
242
243                    //Check 'cluster-type' label for Virtlet VMs
244                    if clusterType == "virtlet-vm" {
245                        //Get VM IP address of master
246                        hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
247                        if err != nil || hostIPaddress == "" {
248                            err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
249                            return reconcile.Result{}, err
250                        }
251                        containsMac = true
252                    }
253
254                    if containsMac{
255
256                        if clusterType != "virtlet-vm" {
257                            fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
258
259                            //Get IP address of master
260                            hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile )
261                            if err != nil || hostIPaddress == ""{
262                                err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
263                                return reconcile.Result{}, err
264                            }
265                        }
266
267                        allString += masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
268                        masterString += masterLabel + "\n"
269                        clusterData[masterTag + masterLabel] = hostIPaddress
270
271                        fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
272
273                        if len(workersList) != 0 {
274
275                            //Iterate through workersList and get all the mac addresses
276                            for _, workerMap := range workersList {
277
278                                //Get worker labels from the workermap
279                                for workerLabel, worker := range workerMap {
280
281                                    //Check if workerString already contains worker label
282                                    containsWorkerLabel := strings.Contains(workerString, workerLabel)
283                                    workerMAC := worker.MACaddress
284                                    hostIPaddress = ""
285
286                                    //Error occurs if the same label is given to different hosts (assumption,
287                                    //each MAC address represents a unique host
288                                    if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
289                                      if containsWorkerLabel {
290                                             strings.ReplaceAll(workerString, workerLabel, "")
291                                          }
292                                       err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
293                                       different label to node with MACAddress %s`, workerLabel, workerMAC)
294                                       return reconcile.Result{}, err
295
296                                    //same node performs worker and master roles
297                                    } else if workerLabel == masterLabel && !containsWorkerLabel {
298                                         workerString += workerLabel + "\n"
299
300                                         //Add host to ip address config map with worker tag
301                                         hostIPaddress = clusterData[masterTag + masterLabel]
302                                         clusterData[workerTag + masterLabel] = hostIPaddress
303
304                                    //Error occurs if the same node is given different labels
305                                    } else if workerLabel != masterLabel && workerMAC == masterMAC {
306                                          if containsWorkerLabel {
307                                             strings.ReplaceAll(workerString, workerLabel, "")
308                                          }
309                                          err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
310                                                         different label to node with MACAddress %s`, workerLabel, workerMAC)
311                                          return reconcile.Result{}, err
312
313                                    //worker node is different from any master node and it has not been added to the worker list
314                                    } else if workerLabel != masterLabel && !containsWorkerLabel {
315
316                                         // Error occurs if MAC address not provided for worker node not matching master
317                                         if workerMAC == "" {
318                                           err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
319                                           return reconcile.Result{}, err
320                                          }
321
322                                         containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
323
324                                         if clusterType == "virtlet-vm" {
325                                             //Get VM IP address of master
326                                             hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
327                                             if err != nil || hostIPaddress == "" {
328                                                 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
329                                                 return reconcile.Result{}, err
330                                             }
331                                             containsMac = true
332                                         }
333
334                                         if containsMac{
335
336                                            if clusterType != "virtlet-vm" {
337                                                fmt.Printf("Host %s matches that macAddress\n", bmhCR)
338
339                                                //Get IP address of worker
340                                                hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile )
341                                                if err != nil {
342                                                    fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
343                                                    return reconcile.Result{}, err
344                                                }
345                                            }
346                                            fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
347
348
349                                            allString += workerLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
350                                            workerString += workerLabel + "\n"
351                                            clusterData[workerTag + workerLabel] = hostIPaddress
352
353                                        //No host found that matches the worker MAC
354                                        } else {
355
356                                             err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
357                                             return reconcile.Result{}, err
358                                           }
359                                    }
360                              }
361                        }
362                    //No worker node specified, add master as worker node
363                    } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
364                        workerString += masterLabel + "\n"
365
366                        //Add host to ip address config map with worker tag
367                        hostIPaddress = clusterData[masterTag + masterLabel]
368                        clusterData[workerTag + masterLabel] = hostIPaddress
369                    }
370
371                    //No host matching master MAC found
372                    } else {
373                       err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
374                       return reconcile.Result{}, err
375                    }
376              }
377         }
378
379         //Create host.ini file
380         //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini"
381         iniHostFilePath := clusterDir + "/hosts.ini"
382         newFile, err := os.Create(iniHostFilePath)
383         defer newFile.Close()
384
385
386         if err != nil {
387            fmt.Printf("Error occured while creating file \n %v", err)
388            return reconcile.Result{}, err
389         }
390
391         hostFile, err := ini.Load(iniHostFilePath)
392         if err != nil {
393            fmt.Printf("Error occured while Loading file \n %v", err)
394            return reconcile.Result{}, err
395         }
396
397         _, err = hostFile.NewRawSection("all", allString)
398         if err != nil {
399            fmt.Printf("Error occured while creating section \n %v", err)
400            return reconcile.Result{}, err
401         }
402         _, err = hostFile.NewRawSection("kube-master", masterString)
403         if err != nil {
404            fmt.Printf("Error occured while creating section \n %v", err)
405            return reconcile.Result{}, err
406         }
407
408         _, err = hostFile.NewRawSection("kube-node", workerString)
409         if err != nil {
410            fmt.Printf("Error occured while creating section \n %v", err)
411            return reconcile.Result{}, err
412         }
413
414         _, err = hostFile.NewRawSection("etcd", masterString)
415         if err != nil {
416            fmt.Printf("Error occured while creating section \n %v", err)
417            return reconcile.Result{}, err
418         }
419
420         if clusterType == "virtlet-vm" {
421                 _, err = hostFile.NewRawSection("ovn-central", masterString)
422                 if err != nil {
423                         fmt.Printf("Error occured while creating section \n %v", err)
424                         return reconcile.Result{}, err
425                 }
426                 _, err = hostFile.NewRawSection("ovn-controller", masterString)
427                 if err != nil {
428                         fmt.Printf("Error occured while creating section \n %v", err)
429                         return reconcile.Result{}, err
430                 }
431         }
432
433         _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master")
434         if err != nil {
435            fmt.Printf("Error occured while creating section \n %v", err)
436            return reconcile.Result{}, err
437         }
438
439
440         //Create host.ini file for KUD
441         hostFile.SaveTo(iniHostFilePath)
442
443         //Install KUD
444         err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, clientset)
445         if err != nil {
446            fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
447            return reconcile.Result{}, err
448         }
449
450         //Start separate thread to keep checking job status, Create an IP address configmap
451         //for cluster if KUD is successfully installed
452         go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, clientset)
453
454         return reconcile.Result{}, nil
455
456        }
457
458
459
460         ///////////////////////////////////////////////////////////////////////////////////////////////
461         ////////////////         Software CR was created so install software         /////////////////
462         //////////////////////////////////////////////////////////////////////////////////////////////
463         softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
464         defaultSSHPrivateKey := "/root/.ssh/id_rsa"
465
466         //Get IP address configmap for the cluster
467         clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, clientset)
468         if err != nil {
469            fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
470            return reconcile.Result{}, err
471         }
472
473         for hostLabel, ipAddress := range clusterConfigMapData {
474
475             if strings.Contains(hostLabel, masterTag) {
476                // Its a master node, install master software
477                err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
478                if err != nil {
479                   fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
480                }
481             } else if strings.Contains(hostLabel, workerTag) {
482               // Its a worker node, install worker software
483               err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
484               if err != nil {
485                   fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
486                }
487
488             }
489
490         }
491
492         return reconcile.Result{}, nil
493 }
494
495 //Function to Get List containing baremetal hosts
496 func listBareMetalHosts(config *rest.Config) (*unstructured.UnstructuredList, error) {
497
498     //Create Dynamic Client  for BareMetalHost CRD
499     bmhDynamicClient, err := dynamic.NewForConfig(config)
500
501     if err != nil {
502        fmt.Println("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
503        return &unstructured.UnstructuredList{}, err
504     }
505
506     //Create GVR representing a BareMetalHost CR
507     bmhGVR := schema.GroupVersionResource{
508       Group:    "metal3.io",
509       Version:  "v1alpha1",
510       Resource: "baremetalhosts",
511     }
512
513     //Get List containing all BareMetalHosts CRs
514     bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
515     if err != nil {
516        fmt.Println("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
517        return &unstructured.UnstructuredList{}, err
518     }
519
520     return bareMetalHosts, nil
521 }
522
523
524 //Function to check if BareMetalHost containing MAC address exist
525 func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
526
527      //Convert macAddress to byte array for comparison
528      macAddressByte :=  []byte(macAddress)
529      macBool := false
530
531      for _, bareMetalHost := range bareMetalHostList.Items {
532          bmhJson, _ := bareMetalHost.MarshalJSON()
533
534          macBool = bytes.Contains(bmhJson, macAddressByte)
535          if macBool{
536              return macBool, bareMetalHost.GetName()
537          }
538
539       }
540
541          return macBool, ""
542
543 }
544
545
546 //Function to get the IP address of a host from the DHCP file
547 func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, error) {
548
549      //Read the dhcp lease file
550      dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
551      if err != nil {
552         fmt.Println("Failed to read lease file\n")
553         return "", err
554      }
555
556      dhcpLeases := string(dhcpFile)
557
558      //Regex to use to search dhcpLeases
559      reg := "lease.*{|ethernet.*|\n. binding state.*"
560      re, err := regexp.Compile(reg)
561      if err != nil {
562         fmt.Println("Could not create Regexp object, Error %v occured\n", err)
563         return "", err
564      }
565
566      //Get String containing leased Ip addresses and Corressponding MAC addresses
567      out := re.FindAllString(dhcpLeases, -1)
568      outString := strings.Join(out, " ")
569      stringReplacer := strings.NewReplacer("lease", "", "ethernet ", "", ";", "",
570      " binding state", "", "{", "")
571      replaced := stringReplacer.Replace(outString)
572      ipMacList := strings.Fields(replaced)
573
574
575      //Get IP addresses corresponding to Input MAC Address
576      for idx := len(ipMacList)-1 ; idx >= 0; idx -- {
577          item := ipMacList[idx]
578          if item == macAddress  {
579
580             leaseState := ipMacList[idx -1]
581             if leaseState != "active" {
582                err := fmt.Errorf("No active ip address lease found for MAC address %s \n", macAddress)
583                fmt.Printf("%v\n", err)
584                return "", err
585             }
586             ipAdd := ipMacList[idx - 2]
587             return ipAdd, nil
588     }
589
590  }
591      return "", nil
592 }
593
594 //Function to create configmap 
595 func createConfigMap(data, labels map[string]string, namespace string, clientset *kubernetes.Clientset) error{
596
597      configmapClient := clientset.CoreV1().ConfigMaps(namespace)
598
599      configmap := &corev1.ConfigMap{
600
601         ObjectMeta: metav1.ObjectMeta{
602                         Name: labels["cluster"] + "-configmap",
603                         Labels: labels,
604                 },
605         Data: data,
606      }
607
608
609       _, err := configmapClient.Create(configmap)
610       if err != nil {
611          return err
612
613       }
614       return nil
615
616 }
617
618 //Function to get configmap Data
619 func getConfigMapData(namespace, clusterName string, clientset *kubernetes.Clientset) (map[string]string, error) {
620
621      configmapClient := clientset.CoreV1().ConfigMaps(namespace)
622      configmapName := clusterName + "-configmap"
623      clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{})
624      if err != nil {
625         return nil, err
626      }
627
628      configmapData := clusterConfigmap.Data
629      return configmapData, nil
630 }
631
632 //Function to create job for KUD installation
633 func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, clientset *kubernetes.Clientset) error{
634
635     var backOffLimit int32 = 0
636     var privi bool = true
637
638
639     jobClient := clientset.BatchV1().Jobs("default")
640
641         job := &batchv1.Job{
642
643         ObjectMeta: metav1.ObjectMeta{
644                         Name: "kud-" + clusterName,
645                        Labels: labels,
646                 },
647                 Spec: batchv1.JobSpec{
648                       Template: corev1.PodTemplateSpec{
649                                 ObjectMeta: metav1.ObjectMeta{
650                                         Labels: labels,
651                                 },
652
653
654                         Spec: corev1.PodSpec{
655                               HostNetwork: true,
656                               Containers: []corev1.Container{{
657                                           Name: "kud",
658                                           Image: "github.com/onap/multicloud-k8s:latest",
659                                           ImagePullPolicy: "IfNotPresent",
660                                           VolumeMounts: []corev1.VolumeMount{{
661                                                         Name: "multi-cluster",
662                                                         MountPath: "/opt/kud/multi-cluster",
663                                                         },
664                                                         {
665                                                         Name: "secret-volume",
666                                                         MountPath: "/.ssh",
667                                                         },
668
669                                            },
670                                            Command: []string{"/bin/sh","-c"},
671                                            Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh; ./installer --cluster " +  clusterName},
672                                            SecurityContext: &corev1.SecurityContext{
673                                                             Privileged : &privi,
674
675                                            },
676                                           },
677                                  },
678                                  Volumes: []corev1.Volume{{
679                                           Name: "multi-cluster",
680                                           VolumeSource: corev1.VolumeSource{
681                                                        HostPath: &corev1.HostPathVolumeSource{
682                                                               Path : "/opt/kud/multi-cluster",
683                                                      }}},
684                                           {
685                                           Name: "secret-volume",
686                                           VolumeSource: corev1.VolumeSource{
687                                                         Secret: &corev1.SecretVolumeSource{
688                                                               SecretName: "ssh-key-secret",
689                                                         },
690
691                                           }}},
692                                  RestartPolicy: "Never",
693                               },
694
695                              },
696                              BackoffLimit : &backOffLimit,
697                              },
698
699                          }
700                     _, err := jobClient.Create(job)
701                     if err != nil {
702                        fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
703                        return err
704                     }
705                     return nil
706
707 }
708
709 //Function to Check if job succeeded
710 func checkJob(clusterName, namespace string, data, labels map[string]string, clientset *kubernetes.Clientset) {
711
712      fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
713      jobName := "kud-" + clusterName
714      jobClient := clientset.BatchV1().Jobs(namespace)
715
716      for {
717          time.Sleep(2 * time.Second)
718
719          job, err := jobClient.Get(jobName, metav1.GetOptions{})
720          if err != nil {
721             fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
722             return
723          }
724          jobSucceeded := job.Status.Succeeded
725          jobFailed := job.Status.Failed
726
727          if jobSucceeded == 1 {
728             fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
729
730             //KUD was installed successfully create configmap to store IP address info for the cluster
731             err = createConfigMap(data, labels, namespace, clientset)
732             if err != nil {
733                fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err)
734                return
735             }
736             return
737          }
738
739         if jobFailed == 1 {
740            fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
741            return
742         }
743
744      }
745     return
746
747 }
748
749 //Function to get software list from software CR
750 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
751
752      CRclusterName := softwareCR.GetLabels()["cluster"]
753
754      masterSofwareList := softwareCR.Spec.MasterSoftware
755      workerSoftwareList := softwareCR.Spec.WorkerSoftware
756
757      return CRclusterName, masterSofwareList, workerSoftwareList
758 }
759
760 //Function to install software in clusterHosts
761 func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
762
763      var installString string
764      for _, software := range softwareList {
765
766         switch t := software.(type){
767         case string:
768              installString += software.(string) + " "
769         case interface{}:
770              softwareMap, errBool := software.(map[string]interface{})
771              if !errBool {
772                 fmt.Printf("Error occured, cannot install software %v\n", software)
773              }
774              for softwareName, versionMap := range softwareMap {
775
776                  versionMAP, _ := versionMap.(map[string]interface{})
777                  version := versionMAP["version"].(string)
778                  installString += softwareName + "=" + version + " "
779              }
780         default:
781             fmt.Printf("invalid format %v\n", t)
782         }
783
784      }
785
786      err := sshInstaller(installString, sshPrivateKey, ipAddress)
787      if err != nil {
788         return err
789      }
790      return nil
791
792 }
793
794 //Function to Run Installation commands via ssh
795 func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
796
797      buffer, err := ioutil.ReadFile(sshPrivateKey)
798      if err != nil {
799         return err
800      }
801
802      key, err := ssh.ParsePrivateKey(buffer)
803      if err != nil {
804         return err
805      }
806
807      sshConfig := &ssh.ClientConfig{
808         User: "root",
809         Auth: []ssh.AuthMethod{
810               ssh.PublicKeys(key),
811      },
812
813      HostKeyCallback: ssh.InsecureIgnoreHostKey(),
814      }
815
816     client, err := ssh.Dial("tcp", ipAddress + ":22", sshConfig)
817     if err != nil {
818        return err
819     }
820
821     session, err := client.NewSession()
822     if err != nil {
823        return err
824     }
825
826     defer session.Close()
827     defer client.Close()
828
829     cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
830     err = session.Start(cmd)
831
832     if err != nil {
833        return err
834     }
835
836     return nil
837
838 }
839
840 func listVirtletVMs() ([]VirtletVM, error) {
841
842         var vmPodList []VirtletVM
843
844         config, err :=  config.GetConfig()
845         if err != nil {
846                 fmt.Println("Could not get kube config, Error: %v\n", err)
847                 return []VirtletVM{}, err
848         }
849
850         // create the clientset
851         clientset, err := kubernetes.NewForConfig(config)
852         if err != nil {
853                 fmt.Println("Could not create the client set, Error: %v\n", err)
854                 return []VirtletVM{}, err
855         }
856
857         pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
858         if err != nil {
859                 fmt.Println("Could not get pod info, Error: %v\n", err)
860                 return []VirtletVM{}, err
861         }
862
863         for _, pod := range pods.Items {
864                 var podAnnotation map[string]interface{}
865                 var podStatus corev1.PodStatus
866                 var podDefaultNetStatus []NetworksStatus
867
868                 annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
869                 if err != nil {
870                         fmt.Println("Could not get pod annotations, Error: %v\n", err)
871                         return []VirtletVM{}, err
872                 }
873
874                 json.Unmarshal([]byte(annotation), &podAnnotation)
875                 if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
876                         runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
877
878                         podStatusJson, _ := json.Marshal(pod.Status)
879                         json.Unmarshal([]byte(podStatusJson), &podStatus)
880
881                         if runtime  == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["v1.multus-cni.io/default-network"] != nil {
882                                 ns := podAnnotation["v1.multus-cni.io/default-network"].(string)
883                                 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
884
885                                 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
886                         }
887                 }
888         }
889
890         return vmPodList, nil
891 }
892
893 func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
894
895         for i := 0; i < len(vmList); i++ {
896                 if vmList[i].MACaddress == macAddress {
897                         return vmList[i].IPaddress, nil
898                 }
899         }
900         return "", nil
901 }