Modified BPA controller and fixed bugs
[icn.git] / cmd / bpa-operator / pkg / controller / provisioning / provisioning_controller.go
1 package provisioning
2
3 import (
4         "context"
5         "os"
6         "fmt"
7         "time"
8         "bytes"
9         "regexp"
10         "strings"
11         "io/ioutil"
12         "encoding/json"
13
14         bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
15         metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
16         corev1 "k8s.io/api/core/v1"
17         batchv1 "k8s.io/api/batch/v1"
18         "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
19         "k8s.io/apimachinery/pkg/runtime/schema"
20         "k8s.io/apimachinery/pkg/api/errors"
21         "k8s.io/apimachinery/pkg/runtime"
22         "k8s.io/apimachinery/pkg/types"
23         "k8s.io/client-go/dynamic"
24
25         "sigs.k8s.io/controller-runtime/pkg/client"
26         "sigs.k8s.io/controller-runtime/pkg/client/config"
27         "sigs.k8s.io/controller-runtime/pkg/controller"
28         "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
29         "sigs.k8s.io/controller-runtime/pkg/handler"
30         "sigs.k8s.io/controller-runtime/pkg/manager"
31         "sigs.k8s.io/controller-runtime/pkg/reconcile"
32         logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
33         "sigs.k8s.io/controller-runtime/pkg/source"
34         "gopkg.in/ini.v1"
35         "golang.org/x/crypto/ssh"
36 )
37
38 type VirtletVM struct {
39         IPaddress string
40         MACaddress string
41 }
42
43 type NetworksStatus struct {
44         Name string `json:"name,omitempty"`
45         Interface string `json:"interface,omitempty"`
46         Ips []string `json:"ips,omitempty"`
47         Mac string `json:"mac,omitempty"`
48         Default bool `json:"default,omitempty"`
49         Dns interface{} `json:"dns,omitempty"`
50 }
51
52 var log = logf.Log.WithName("controller_provisioning")
53
54 /**
55 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
56 * business logic.  Delete these comments after modifying this file.*
57  */
58
59 // Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
60 // and Start it when the Manager is Started.
61 func Add(mgr manager.Manager) error {
62         return add(mgr, newReconciler(mgr))
63 }
64
65 // newReconciler returns a new reconcile.Reconciler
66 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
67
68         config, err :=  config.GetConfig()
69         if err != nil {
70            fmt.Printf("Could not get kube config, Error: %v\n", err)
71         }
72
73        bmhDynamicClient, err := dynamic.NewForConfig(config)
74
75        if err != nil {
76           fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
77        }
78
79        return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), bmhClient: bmhDynamicClient }
80 }
81
82 // add adds a new Controller to mgr with r as the reconcile.Reconciler
83 func add(mgr manager.Manager, r reconcile.Reconciler) error {
84         // Create a new controller
85         c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
86         if err != nil {
87                 return err
88         }
89
90         // Watch for changes to primary resource Provisioning
91         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
92         if err != nil {
93                 return err
94         }
95
96         // Watch for changes to resource configmap created as a consequence of the provisioning CR
97         err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
98                 IsController: true,
99                 OwnerType:   &bpav1alpha1.Provisioning{},
100         })
101
102         if err != nil {
103                 return err
104         }
105
106        //Watch for changes to job resource also created as a consequence of the provisioning CR
107        err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
108                 IsController: true,
109                 OwnerType:   &bpav1alpha1.Provisioning{},
110         })
111
112         if err != nil {
113                 return err
114         }
115
116         // Watch for changes to resource software CR
117         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
118         if err != nil {
119                 return err
120         }
121
122
123         return nil
124 }
125
126 // blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
127 var _ reconcile.Reconciler = &ReconcileProvisioning{}
128
129 // ReconcileProvisioning reconciles a Provisioning object
130 type ReconcileProvisioning struct {
131         // This client, initialized using mgr.Client() above, is a split client
132         // that reads objects from the cache and writes to the apiserver
133         client client.Client
134         scheme *runtime.Scheme
135         bmhClient dynamic.Interface
136 }
137
138 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
139 // and what is in the Provisioning.Spec
140 // TODO(user): Modify this Reconcile function to implement your Controller logic.  This example creates
141 // a Pod as an example
142 // Note:
143 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
144 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
145 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
146         reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
147         fmt.Printf("\n\n")
148         reqLogger.Info("Reconciling Custom Resource")
149
150
151
152         // Fetch the Provisioning instance
153         provisioningInstance := &bpav1alpha1.Provisioning{}
154         softwareInstance := &bpav1alpha1.Software{}
155         err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
156         provisioningCreated := true
157         if err != nil {
158
159                          //Check if its a Software Instance
160                          err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
161                          if err != nil {
162                              if errors.IsNotFound(err) {
163                                 // Request object not found, could have been deleted after reconcile request.
164                                 // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
165                                 // Return and don't requeue
166                                 return reconcile.Result{}, nil
167                              }
168
169                          // Error reading the object - requeue the request.
170                          return reconcile.Result{}, err
171                          }
172
173                          //No error occured and so a Software CR was created not a Provisoning CR
174                          provisioningCreated = false
175         }
176
177
178         masterTag := "MASTER_"
179         workerTag := "WORKER_"
180
181         if provisioningCreated {
182
183         ///////////////////////////////////////////////////////////////////////////////////////////////
184         ////////////////         Provisioning CR was created so install KUD          /////////////////
185         //////////////////////////////////////////////////////////////////////////////////////////////
186         provisioningVersion := provisioningInstance.ResourceVersion
187         clusterName := provisioningInstance.Labels["cluster"]
188         clusterType := provisioningInstance.Labels["cluster-type"]
189         mastersList := provisioningInstance.Spec.Masters
190         workersList := provisioningInstance.Spec.Workers
191
192
193         bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
194         virtletVMList, _ := listVirtletVMs(r.client)
195
196
197
198         var allString string
199         var masterString string
200         var workerString string
201
202         dhcpLeaseFile := "/var/lib/dhcp/dhcpd.leases"
203         multiClusterDir := "/multi-cluster"
204
205         //Create Directory for the specific cluster
206         clusterDir := multiClusterDir + "/" + clusterName
207         os.MkdirAll(clusterDir, os.ModePerm)
208
209         //Create Maps to be used for cluster ip address to label configmap
210         clusterData := make(map[string]string)
211         clusterMACData := make(map[string]string)
212
213
214
215        //Iterate through mastersList and get all the mac addresses and IP addresses
216        for _, masterMap := range mastersList {
217
218                 for masterLabel, master := range masterMap {
219                    masterMAC := master.MACaddress
220                    hostIPaddress := ""
221
222                    if masterMAC == "" {
223                       err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
224                       return reconcile.Result{}, err
225                    }
226
227
228                    // Check if master MAC address has already been used
229                    usedMAC, err := r.macAddressUsed(provisioningInstance.Namespace, masterMAC, clusterName)
230
231
232                    if err != nil {
233
234                       fmt.Printf("Error occured while checking if mac Address has already been used\n %v", err)
235                       return reconcile.Result{}, err
236                    }
237
238                    if usedMAC {
239
240                       err = fmt.Errorf("MAC address %s has already been used, check and update provisioning CR", masterMAC)
241                       return reconcile.Result{}, err
242
243                    }
244
245                    // Check if Baremetal host with specified MAC address exist
246                    containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
247
248                    //Check 'cluster-type' label for Virtlet VMs
249                    if clusterType == "virtlet-vm" {
250                        //Get VM IP address of master
251                        hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
252                        if err != nil || hostIPaddress == "" {
253                            err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
254                            return reconcile.Result{}, err
255                        }
256                        containsMac = true
257                    }
258
259                    if containsMac {
260
261                        if clusterType != "virtlet-vm" {
262                            fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
263
264                            //Get IP address of master
265                            hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile )
266                            if err != nil || hostIPaddress == ""{
267                                err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
268                                return reconcile.Result{}, err
269                            }
270                        }
271
272                        allString += masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
273                        if clusterType == "virtlet-vm" {
274                            allString = masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
275                        }
276                        masterString += masterLabel + "\n"
277                        clusterData[masterTag + masterLabel] = hostIPaddress
278                        clusterMACData[strings.ReplaceAll(masterMAC, ":", "-")] = masterTag + masterLabel
279
280                        fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
281
282
283                        // Check if any worker MAC address was specified
284                        if len(workersList) != 0 {
285
286                            //Iterate through workersList and get all the mac addresses
287                            for _, workerMap := range workersList {
288
289                                //Get worker labels from the workermap
290                                for workerLabel, worker := range workerMap {
291
292                                    //Check if workerString already contains worker label
293                                    containsWorkerLabel := strings.Contains(workerString, workerLabel)
294                                    workerMAC := worker.MACaddress
295                                    hostIPaddress = ""
296
297                                    //Error occurs if the same label is given to different hosts (assumption,
298                                    //each MAC address represents a unique host
299                                    if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
300                                      if containsWorkerLabel {
301                                             strings.ReplaceAll(workerString, workerLabel, "")
302                                          }
303                                       err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
304                                       different label to node with MACAddress %s`, workerLabel, workerMAC)
305                                       return reconcile.Result{}, err
306
307                                    //same node performs worker and master roles
308                                    } else if workerLabel == masterLabel && !containsWorkerLabel {
309                                         workerString += workerLabel + "\n"
310
311                                         //Add host to ip address config map with worker tag
312                                         hostIPaddress = clusterData[masterTag + masterLabel]
313                                         clusterData[workerTag + masterLabel] = hostIPaddress
314
315                                    //Error occurs if the same node is given different labels
316                                    } else if workerLabel != masterLabel && workerMAC == masterMAC {
317                                          if containsWorkerLabel {
318                                             strings.ReplaceAll(workerString, workerLabel, "")
319                                          }
320                                          err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
321                                                         different label to node with MACAddress %s`, workerLabel, workerMAC)
322                                          return reconcile.Result{}, err
323
324                                    //worker node is different from any master node and it has not been added to the worker list
325                                    } else if workerLabel != masterLabel && !containsWorkerLabel {
326
327                                         // Error occurs if MAC address not provided for worker node not matching master
328                                         if workerMAC == "" {
329                                           err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
330                                           return reconcile.Result{}, err
331                                          }
332
333                                         // Check if worker MAC address has already been used
334                                         usedMAC, err = r.macAddressUsed(provisioningInstance.Namespace, workerMAC, clusterName)
335
336                                         if err != nil {
337
338                                            fmt.Printf("Error occured while checking if mac Address has already been used\n %v", err)
339                                            return reconcile.Result{}, err
340                                         }
341
342                                         if usedMAC {
343
344                                            err = fmt.Errorf("MAC address %s has already been used, check and update provisioning CR", workerMAC)
345                                            return reconcile.Result{}, err
346
347                                         }
348
349                                         containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
350
351                                         if clusterType == "virtlet-vm" {
352                                             //Get VM IP address of master
353                                             hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
354                                             if err != nil || hostIPaddress == "" {
355                                                 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
356                                                 return reconcile.Result{}, err
357                                             }
358                                             containsMac = true
359                                         }
360
361                                         if containsMac{
362
363                                            if clusterType != "virtlet-vm" {
364                                                fmt.Printf("Host %s matches that macAddress\n", bmhCR)
365
366                                                //Get IP address of worker
367                                                hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile )
368                                                if err != nil {
369                                                    fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
370                                                    return reconcile.Result{}, err
371                                                }
372                                            }
373                                            fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
374
375                                            allString += workerLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
376                                            if clusterType == "virtlet-vm" {
377                                                allString = masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
378                                            }
379                                            workerString += workerLabel + "\n"
380                                            clusterData[workerTag + workerLabel] = hostIPaddress
381                                            clusterMACData[strings.ReplaceAll(workerMAC, ":", "-")] = workerTag + workerLabel
382
383                                        //No host found that matches the worker MAC
384                                        } else {
385
386                                             err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
387                                             return reconcile.Result{}, err
388                                           }
389                                    }
390                              }
391                        }
392                    //No worker node specified, add master as worker node
393                    } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
394                        workerString += masterLabel + "\n"
395
396                        //Add host to ip address config map with worker tag
397                        hostIPaddress = clusterData[masterTag + masterLabel]
398                        clusterData[workerTag + masterLabel] = hostIPaddress
399                    }
400
401                    //No host matching master MAC found
402                    } else {
403                       err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
404                       return reconcile.Result{}, err
405                    }
406              }
407         }
408
409         //Create host.ini file
410         iniHostFilePath := clusterDir + "/hosts.ini"
411         newFile, err := os.Create(iniHostFilePath)
412         defer newFile.Close()
413
414
415         if err != nil {
416            fmt.Printf("Error occured while creating file \n %v", err)
417            return reconcile.Result{}, err
418         }
419
420         hostFile, err := ini.Load(iniHostFilePath)
421         if err != nil {
422            fmt.Printf("Error occured while Loading file \n %v", err)
423            return reconcile.Result{}, err
424         }
425
426         _, err = hostFile.NewRawSection("all", allString)
427         if err != nil {
428            fmt.Printf("Error occured while creating section \n %v", err)
429            return reconcile.Result{}, err
430         }
431         _, err = hostFile.NewRawSection("kube-master", masterString)
432         if err != nil {
433            fmt.Printf("Error occured while creating section \n %v", err)
434            return reconcile.Result{}, err
435         }
436
437         _, err = hostFile.NewRawSection("kube-node", workerString)
438         if err != nil {
439            fmt.Printf("Error occured while creating section \n %v", err)
440            return reconcile.Result{}, err
441         }
442
443         _, err = hostFile.NewRawSection("etcd", masterString)
444         if err != nil {
445            fmt.Printf("Error occured while creating section \n %v", err)
446            return reconcile.Result{}, err
447         }
448
449         _, err = hostFile.NewRawSection("ovn-central", masterString)
450         if err != nil {
451            fmt.Printf("Error occured while creating section \n %v", err)
452            return reconcile.Result{}, err
453         }
454
455         _, err = hostFile.NewRawSection("ovn-controller", workerString)
456         if err != nil {
457            fmt.Printf("Error occured while creating section \n %v", err)
458            return reconcile.Result{}, err
459         }
460
461         _, err = hostFile.NewRawSection("virtlet", workerString)
462         if err != nil {
463            fmt.Printf("Error occured while creating section \n %v", err)
464            return reconcile.Result{}, err
465         }
466
467         _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master")
468         if err != nil {
469            fmt.Printf("Error occured while creating section \n %v", err)
470            return reconcile.Result{}, err
471         }
472
473
474         //Create host.ini file for KUD
475         hostFile.SaveTo(iniHostFilePath)
476
477         // Create configmap to store MAC address info for the cluster
478         cmName := provisioningInstance.Labels["cluster"] + "-mac-addresses"
479         foundConfig := &corev1.ConfigMap{}
480         err = r.client.Get(context.TODO(), types.NamespacedName{Name: cmName, Namespace: provisioningInstance.Namespace}, foundConfig)
481
482         // Configmap was found but the provisioning CR was updated so update configMap
483         if err == nil && foundConfig.Labels["provisioning-version"] != provisioningVersion {
484
485            foundConfig.Data = clusterMACData
486            foundConfig.Labels =  provisioningInstance.Labels
487            foundConfig.Labels["configmap-type"] = "mac-address"
488            foundConfig.Labels["provisioning-version"] = provisioningVersion
489            err = r.client.Update(context.TODO(), foundConfig)
490            if err != nil {
491               fmt.Printf("Error occured while updating mac address configmap for provisioningCR %s\n ERROR: %v\n", provisioningInstance.Name,
492               err)
493               return reconcile.Result{}, err
494            }
495
496         }  else if err != nil && errors.IsNotFound(err) {
497            labels :=  provisioningInstance.Labels
498            labels["configmap-type"] = "mac-address"
499            labels["provisioning-version"] = provisioningVersion
500            err = r.createConfigMap(provisioningInstance, clusterMACData, labels, cmName)
501            if err != nil {
502               fmt.Printf("Error occured while creating MAC address configmap for cluster %v\n ERROR: %v", clusterName, err)
503               return reconcile.Result{}, err
504             }
505
506         } else if err != nil {
507             fmt.Printf("ERROR occured in Create MAC address Config map section: %v\n", err)
508             return reconcile.Result{}, err
509           }
510
511         //Install KUD
512         err = r.createKUDinstallerJob(provisioningInstance)
513         if err != nil {
514            fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
515            return reconcile.Result{}, err
516         }
517
518         //Start separate thread to keep checking job status, Create an IP address configmap
519         //for cluster if KUD is successfully installed
520         go r.checkJob(provisioningInstance, clusterData)
521
522         return reconcile.Result{}, nil
523
524        }
525
526
527
528         ///////////////////////////////////////////////////////////////////////////////////////////////
529         ////////////////         Software CR was created so install software         /////////////////
530         //////////////////////////////////////////////////////////////////////////////////////////////
531         softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
532         defaultSSHPrivateKey := "/root/.ssh/id_rsa"
533
534         //Get IP address configmap for the cluster
535         configmapName :=  softwareInstance.Labels["cluster"] + "-configmap"
536         clusterConfigMapData, err := r.getConfigMapData(softwareInstance.Namespace, configmapName)
537         if err != nil {
538            fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
539            return reconcile.Result{}, err
540         }
541
542         for hostLabel, ipAddress := range clusterConfigMapData {
543
544             if strings.Contains(hostLabel, masterTag) {
545                // Its a master node, install master software
546                err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
547                if err != nil {
548                   fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
549                }
550             } else if strings.Contains(hostLabel, workerTag) {
551               // Its a worker node, install worker software
552               err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
553               if err != nil {
554                   fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
555                }
556
557             }
558
559         }
560
561         return reconcile.Result{}, nil
562 }
563
564 //Function to Get List containing baremetal hosts
565 func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) {
566
567     //Create GVR representing a BareMetalHost CR
568     bmhGVR := schema.GroupVersionResource{
569       Group:    "metal3.io",
570       Version:  "v1alpha1",
571       Resource: "baremetalhosts",
572     }
573
574     //Get List containing all BareMetalHosts CRs
575     bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
576     if err != nil {
577        fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
578        return &unstructured.UnstructuredList{}, err
579     }
580
581     return bareMetalHosts, nil
582 }
583
584
585 //Function to check if BareMetalHost containing MAC address exist
586 func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
587
588      //Convert macAddress to byte array for comparison
589      macAddressByte :=  []byte(macAddress)
590      macBool := false
591
592      for _, bareMetalHost := range bareMetalHostList.Items {
593          bmhJson, _ := bareMetalHost.MarshalJSON()
594
595          macBool = bytes.Contains(bmhJson, macAddressByte)
596          if macBool{
597              return macBool, bareMetalHost.GetName()
598          }
599
600       }
601
602          return macBool, ""
603
604 }
605
606 //Function to check if MAC address has been used in another provisioning CR
607 //Returns true if MAC address has already been used
608 func (r *ReconcileProvisioning) macAddressUsed(namespace, macAddress, provisioningCluster string) (bool, error) {
609
610      macKey := strings.ReplaceAll(macAddress, ":", "-")
611
612      //Get configmap List
613      cmList := &corev1.ConfigMapList{}
614      label := map[string]string{"configmap-type": "mac-address"}
615      listOpts :=  client.MatchingLabels(label)
616
617      err := r.client.List(context.TODO(), listOpts, cmList)
618      if err != nil {
619         return false, err
620      }
621
622      for _, configmap := range cmList.Items {
623
624         cmCluster := configmap.Labels["cluster"]
625         if cmCluster != provisioningCluster {
626             cmData, err := r.getConfigMapData(namespace, configmap.Name)
627             if err != nil {
628                return false, err
629
630             }
631
632             if _, exist := cmData[macKey]; exist {
633
634                return exist, nil
635             }
636        }
637
638      }
639
640      return false, nil
641
642 }
643
644
645
646 //Function to get the IP address of a host from the DHCP file
647 func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, error) {
648
649      //Read the dhcp lease file
650      dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
651      if err != nil {
652         fmt.Printf("Failed to read lease file\n")
653         return "", err
654      }
655
656      dhcpLeases := string(dhcpFile)
657
658      //Regex to use to search dhcpLeases
659      reg := "lease.*{|ethernet.*|\n. binding state.*"
660      re, err := regexp.Compile(reg)
661      if err != nil {
662         fmt.Printf("Could not create Regexp object, Error %v occured\n", err)
663         return "", err
664      }
665
666      //Get String containing leased Ip addresses and Corressponding MAC addresses
667      out := re.FindAllString(dhcpLeases, -1)
668      outString := strings.Join(out, " ")
669      stringReplacer := strings.NewReplacer("lease", "", "ethernet ", "", ";", "",
670      " binding state", "", "{", "")
671      replaced := stringReplacer.Replace(outString)
672      ipMacList := strings.Fields(replaced)
673
674
675      //Get IP addresses corresponding to Input MAC Address
676      for idx := len(ipMacList)-1 ; idx >= 0; idx -- {
677          item := ipMacList[idx]
678          if item == macAddress  {
679
680             leaseState := ipMacList[idx -1]
681             if leaseState != "active" {
682                err := fmt.Errorf("No active ip address lease found for MAC address %s \n", macAddress)
683                fmt.Printf("%v\n", err)
684                return "", err
685             }
686             ipAdd := ipMacList[idx - 2]
687             return ipAdd, nil
688     }
689
690  }
691      return "", nil
692 }
693
694 //Function to create configmap
695 func (r *ReconcileProvisioning) createConfigMap(p *bpav1alpha1.Provisioning, data, labels map[string]string, cmName string) error{
696
697          // Configmap has not been created, create it
698           configmap := &corev1.ConfigMap{
699
700               ObjectMeta: metav1.ObjectMeta{
701                         Name: cmName,
702                         Namespace: p.Namespace,
703                         Labels: labels,
704                       },
705               Data: data,
706           }
707
708
709          // Set provisioning instance as the owner of the job
710          controllerutil.SetControllerReference(p, configmap, r.scheme)
711          err := r.client.Create(context.TODO(), configmap)
712          if err != nil {
713              return err
714
715          }
716
717      return nil
718
719 }
720
721 //Function to get configmap Data
722 func (r *ReconcileProvisioning) getConfigMapData(namespace, configmapName string) (map[string]string, error) {
723
724      clusterConfigmap := &corev1.ConfigMap{}
725      err := r.client.Get(context.TODO(), types.NamespacedName{Name: configmapName, Namespace: namespace}, clusterConfigmap)
726      if err != nil {
727         return nil, err
728      }
729
730      configmapData := clusterConfigmap.Data
731      return configmapData, nil
732 }
733
734 //Function to create job for KUD installation
735 func (r *ReconcileProvisioning) createKUDinstallerJob(p *bpav1alpha1.Provisioning) error{
736
737     var backOffLimit int32 = 0
738     var privi bool = true
739
740
741     kudPlugins := p.Spec.KUDPlugins
742
743     jobLabel := p.Labels
744     jobLabel["provisioning-version"] = p.ResourceVersion
745     jobName := "kud-" + jobLabel["cluster"]
746
747     // Check if the job already exist
748     foundJob := &batchv1.Job{}
749     err := r.client.Get(context.TODO(), types.NamespacedName{Name: jobName, Namespace: p.Namespace}, foundJob)
750
751     if (err == nil && foundJob.Labels["provisioning-version"] != jobLabel["provisioning-version"]) || (err != nil && errors.IsNotFound(err)) {
752
753        // If err == nil and its in this statement, then provisioning CR was updated, delete job
754        if err == nil {
755           err = r.client.Delete(context.TODO(), foundJob, client.PropagationPolicy(metav1.DeletePropagationForeground))
756           if err != nil {
757              fmt.Printf("Error occured while deleting kud install job for updated provisioning CR %v\n", err)
758              return err
759           }
760        }
761
762        // Job has not been created, create a new kud job
763        installerString := " ./installer --cluster " + p.Labels["cluster"]
764
765        // Check if any plugin was specified
766        if len(kudPlugins) > 0 {
767             plugins := " --plugins"
768
769             for _, plug := range kudPlugins {
770                 plugins += " " + plug
771             }
772
773            installerString += plugins
774        }
775
776        // Define new job
777        job := &batchv1.Job{
778
779         ObjectMeta: metav1.ObjectMeta{
780                        Name: jobName,
781                        Namespace: p.Namespace,
782                        Labels: jobLabel,
783                 },
784                 Spec: batchv1.JobSpec{
785                       Template: corev1.PodTemplateSpec{
786                                 ObjectMeta: metav1.ObjectMeta{
787                                         Labels: p.Labels,
788                                 },
789
790
791                         Spec: corev1.PodSpec{
792                               HostNetwork: true,
793                               Containers: []corev1.Container{{
794                                           Name: "kud",
795                                           Image: "github.com/onap/multicloud-k8s:latest",
796                                           ImagePullPolicy: "IfNotPresent",
797                                           VolumeMounts: []corev1.VolumeMount{{
798                                                         Name: "multi-cluster",
799                                                         MountPath: "/opt/kud/multi-cluster",
800                                                         },
801                                                         {
802                                                         Name: "secret-volume",
803                                                         MountPath: "/.ssh",
804                                                         },
805
806                                            },
807                                            Command: []string{"/bin/sh","-c"},
808                                            Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
809                                            SecurityContext: &corev1.SecurityContext{
810                                                             Privileged : &privi,
811
812                                            },
813                                           },
814                                  },
815                                  Volumes: []corev1.Volume{{
816                                           Name: "multi-cluster",
817                                           VolumeSource: corev1.VolumeSource{
818                                                        HostPath: &corev1.HostPathVolumeSource{
819                                                               Path : "/opt/kud/multi-cluster",
820                                                      }}},
821                                           {
822                                           Name: "secret-volume",
823                                           VolumeSource: corev1.VolumeSource{
824                                                         Secret: &corev1.SecretVolumeSource{
825                                                               SecretName: "ssh-key-secret",
826                                                         },
827
828                                           }}},
829                                  RestartPolicy: "Never",
830                               },
831
832                              },
833                              BackoffLimit : &backOffLimit,
834                              },
835
836        }
837
838        // Set provisioning instance as the owner and controller of the job
839        controllerutil.SetControllerReference(p, job, r.scheme)
840        err = r.client.Create(context.TODO(), job)
841        if err != nil {
842           fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
843           return err
844           }
845
846     } else if err != nil {
847          return err
848     }
849
850    return nil
851
852 }
853
854
855 //Function to Check if job succeeded
856 func (r *ReconcileProvisioning) checkJob(p *bpav1alpha1.Provisioning, data map[string]string) {
857
858      clusterName := p.Labels["cluster"]
859      fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
860      jobName := "kud-" + clusterName
861      job := &batchv1.Job{}
862
863      for {
864
865          err := r.client.Get(context.TODO(), types.NamespacedName{Name: jobName, Namespace: p.Namespace}, job)
866          if err != nil {
867             fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
868             return
869          }
870          jobSucceeded := job.Status.Succeeded
871          jobFailed := job.Status.Failed
872
873          if jobSucceeded == 1 {
874             fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
875
876             //KUD was installed successfully create configmap to store IP address info for the cluster
877             labels := p.Labels
878             labels["provisioning-version"] = p.ResourceVersion
879             cmName := labels["cluster"] + "-configmap"
880             foundConfig := &corev1.ConfigMap{}
881             err := r.client.Get(context.TODO(), types.NamespacedName{Name: cmName, Namespace: p.Namespace}, foundConfig)
882
883             // Check if provisioning CR was updated
884             if err == nil && foundConfig.Labels["provisioning-version"] != labels["provisioning-version"] {
885
886                foundConfig.Data = data
887                foundConfig.Labels =  labels
888                err = r.client.Update(context.TODO(), foundConfig)
889                if err != nil {
890                   fmt.Printf("Error occured while updating IP address configmap for provisioningCR %s\n ERROR: %v\n", p.Name, err)
891                   return
892                 }
893
894             } else if err != nil && errors.IsNotFound(err) {
895                err = r.createConfigMap(p, data, labels, cmName)
896                if err != nil {
897                   fmt.Printf("Error occured while creating IP address configmap for cluster %v\n ERROR: %v", clusterName, err)
898                   return
899                }
900                return
901
902             } else if err != nil {
903               fmt.Printf("ERROR occured while checking if IP address configmap %v already exists: %v\n", cmName, err)
904               return
905               }
906
907
908            return
909          }
910
911         if jobFailed == 1 {
912            fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
913
914            return
915         }
916
917         time.Sleep(5 * time.Second)
918      }
919     return
920
921 }
922 //Function to get software list from software CR
923 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
924
925      CRclusterName := softwareCR.GetLabels()["cluster"]
926
927      masterSofwareList := softwareCR.Spec.MasterSoftware
928      workerSoftwareList := softwareCR.Spec.WorkerSoftware
929
930      return CRclusterName, masterSofwareList, workerSoftwareList
931 }
932
933 //Function to install software in clusterHosts
934 func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
935
936      var installString string
937      for _, software := range softwareList {
938
939         switch t := software.(type){
940         case string:
941              installString += software.(string) + " "
942         case interface{}:
943              softwareMap, errBool := software.(map[string]interface{})
944              if !errBool {
945                 fmt.Printf("Error occured, cannot install software %v\n", software)
946              }
947              for softwareName, versionMap := range softwareMap {
948
949                  versionMAP, _ := versionMap.(map[string]interface{})
950                  version := versionMAP["version"].(string)
951                  installString += softwareName + "=" + version + " "
952              }
953         default:
954             fmt.Printf("invalid format %v\n", t)
955         }
956
957      }
958
959      err := sshInstaller(installString, sshPrivateKey, ipAddress)
960      if err != nil {
961         return err
962      }
963      return nil
964
965 }
966
967 //Function to Run Installation commands via ssh
968 func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
969
970      buffer, err := ioutil.ReadFile(sshPrivateKey)
971      if err != nil {
972         return err
973      }
974
975      key, err := ssh.ParsePrivateKey(buffer)
976      if err != nil {
977         return err
978      }
979
980      sshConfig := &ssh.ClientConfig{
981         User: "root",
982         Auth: []ssh.AuthMethod{
983               ssh.PublicKeys(key),
984      },
985
986      HostKeyCallback: ssh.InsecureIgnoreHostKey(),
987      }
988
989     client, err := ssh.Dial("tcp", ipAddress + ":22", sshConfig)
990     if err != nil {
991        return err
992     }
993
994     session, err := client.NewSession()
995     if err != nil {
996        return err
997     }
998
999     defer session.Close()
1000     defer client.Close()
1001
1002     cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
1003     err = session.Start(cmd)
1004
1005     if err != nil {
1006        return err
1007     }
1008
1009     return nil
1010
1011 }
1012
1013
1014 // List virtlet VMs
1015 func listVirtletVMs(vmClient client.Client) ([]VirtletVM, error) {
1016
1017         var vmPodList []VirtletVM
1018
1019         pods := &corev1.PodList{}
1020         err := vmClient.List(context.TODO(), &client.ListOptions{}, pods)
1021         if err != nil {
1022                 fmt.Printf("Could not get pod info, Error: %v\n", err)
1023                 return []VirtletVM{}, err
1024         }
1025
1026         for _, pod := range pods.Items {
1027                 var podAnnotation map[string]interface{}
1028                 var podStatus corev1.PodStatus
1029                 var podDefaultNetStatus []NetworksStatus
1030
1031                 annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
1032                 if err != nil {
1033                         fmt.Printf("Could not get pod annotations, Error: %v\n", err)
1034                         return []VirtletVM{}, err
1035                 }
1036
1037                 json.Unmarshal([]byte(annotation), &podAnnotation)
1038                 if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
1039                         runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
1040
1041                         podStatusJson, _ := json.Marshal(pod.Status)
1042                         json.Unmarshal([]byte(podStatusJson), &podStatus)
1043
1044                         if runtime  == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
1045                                 ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
1046                                 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
1047
1048                                 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
1049                         }
1050                 }
1051         }
1052
1053         return vmPodList, nil
1054 }
1055
1056 func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
1057
1058         for i := 0; i < len(vmList); i++ {
1059                 if vmList[i].MACaddress == macAddress {
1060                         return vmList[i].IPaddress, nil
1061                 }
1062         }
1063         return "", nil
1064 }