Run go fmt over bpa-operator
[icn.git] / cmd / bpa-operator / pkg / controller / provisioning / provisioning_controller.go
1 package provisioning
2
3 import (
4         "bytes"
5         "context"
6         "encoding/json"
7         "fmt"
8         "io/ioutil"
9         "os"
10         "regexp"
11         "strings"
12         "time"
13
14         bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
15         batchv1 "k8s.io/api/batch/v1"
16         corev1 "k8s.io/api/core/v1"
17         "k8s.io/apimachinery/pkg/api/errors"
18         metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
19         "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
20         "k8s.io/apimachinery/pkg/runtime"
21         "k8s.io/apimachinery/pkg/runtime/schema"
22         "k8s.io/client-go/dynamic"
23
24         "golang.org/x/crypto/ssh"
25         "gopkg.in/ini.v1"
26         "k8s.io/client-go/kubernetes"
27         "sigs.k8s.io/controller-runtime/pkg/client"
28         "sigs.k8s.io/controller-runtime/pkg/client/config"
29         "sigs.k8s.io/controller-runtime/pkg/controller"
30         "sigs.k8s.io/controller-runtime/pkg/handler"
31         "sigs.k8s.io/controller-runtime/pkg/manager"
32         "sigs.k8s.io/controller-runtime/pkg/reconcile"
33         logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
34         "sigs.k8s.io/controller-runtime/pkg/source"
35 )
36
37 type VirtletVM struct {
38         IPaddress  string
39         MACaddress string
40 }
41
42 type NetworksStatus struct {
43         Name      string      `json:"name,omitempty"`
44         Interface string      `json:"interface,omitempty"`
45         Ips       []string    `json:"ips,omitempty"`
46         Mac       string      `json:"mac,omitempty"`
47         Default   bool        `json:"default,omitempty"`
48         Dns       interface{} `json:"dns,omitempty"`
49 }
50
51 var log = logf.Log.WithName("controller_provisioning")
52
53 /**
54 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
55 * business logic.  Delete these comments after modifying this file.*
56  */
57
58 // Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
59 // and Start it when the Manager is Started.
60 func Add(mgr manager.Manager) error {
61         return add(mgr, newReconciler(mgr))
62 }
63
64 // newReconciler returns a new reconcile.Reconciler
65 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
66
67         config, err := config.GetConfig()
68         if err != nil {
69                 fmt.Printf("Could not get kube config, Error: %v\n", err)
70         }
71
72         clientSet, err := kubernetes.NewForConfig(config)
73         if err != nil {
74                 fmt.Printf("Could not create clientset, Error: %v\n", err)
75         }
76         bmhDynamicClient, err := dynamic.NewForConfig(config)
77
78         if err != nil {
79                 fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
80         }
81
82         return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient}
83 }
84
85 // add adds a new Controller to mgr with r as the reconcile.Reconciler
86 func add(mgr manager.Manager, r reconcile.Reconciler) error {
87         // Create a new controller
88         c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
89         if err != nil {
90                 return err
91         }
92
93         // Watch for changes to primary resource Provisioning
94         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
95         if err != nil {
96                 return err
97         }
98
99         // Watch for changes to resource configmap created as a consequence of the provisioning CR
100         err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
101                 IsController: true,
102                 OwnerType:    &bpav1alpha1.Provisioning{},
103         })
104
105         if err != nil {
106                 return err
107         }
108
109         //Watch for changes to job resource also created as a consequence of the provisioning CR
110         err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
111                 IsController: true,
112                 OwnerType:    &bpav1alpha1.Provisioning{},
113         })
114
115         if err != nil {
116                 return err
117         }
118
119         // Watch for changes to resource software CR
120         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
121         if err != nil {
122                 return err
123         }
124
125         return nil
126 }
127
128 // blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
129 var _ reconcile.Reconciler = &ReconcileProvisioning{}
130
131 // ReconcileProvisioning reconciles a Provisioning object
132 type ReconcileProvisioning struct {
133         // This client, initialized using mgr.Client() above, is a split client
134         // that reads objects from the cache and writes to the apiserver
135         client    client.Client
136         scheme    *runtime.Scheme
137         clientset kubernetes.Interface
138         bmhClient dynamic.Interface
139 }
140
141 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
142 // and what is in the Provisioning.Spec
143 // TODO(user): Modify this Reconcile function to implement your Controller logic.  This example creates
144 // a Pod as an example
145 // Note:
146 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
147 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
148 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
149         reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
150         fmt.Printf("\n\n")
151         reqLogger.Info("Reconciling Custom Resource")
152
153         // Fetch the Provisioning instance
154         provisioningInstance := &bpav1alpha1.Provisioning{}
155         softwareInstance := &bpav1alpha1.Software{}
156         err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
157         provisioningCreated := true
158         if err != nil {
159
160                 //Check if its a Software Instance
161                 err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
162                 if err != nil {
163                         if errors.IsNotFound(err) {
164                                 // Request object not found, could have been deleted after reconcile request.
165                                 // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
166                                 // Return and don't requeue
167                                 return reconcile.Result{}, nil
168                         }
169
170                         // Error reading the object - requeue the request.
171                         return reconcile.Result{}, err
172                 }
173
174                 //No error occured and so a Software CR was created not a Provisoning CR
175                 provisioningCreated = false
176         }
177
178         masterTag := "MASTER_"
179         workerTag := "WORKER_"
180
181         if provisioningCreated {
182
183                 ///////////////////////////////////////////////////////////////////////////////////////////////
184                 ////////////////         Provisioning CR was created so install KUD          /////////////////
185                 //////////////////////////////////////////////////////////////////////////////////////////////
186                 clusterName := provisioningInstance.Labels["cluster"]
187                 clusterType := provisioningInstance.Labels["cluster-type"]
188                 mastersList := provisioningInstance.Spec.Masters
189                 workersList := provisioningInstance.Spec.Workers
190                 kudPlugins := provisioningInstance.Spec.KUDPlugins
191                 podSubnet := provisioningInstance.Spec.PodSubnet
192
193                 bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
194                 virtletVMList, _ := listVirtletVMs(r.clientset)
195
196                 var allString string
197                 var masterString string
198                 var workerString string
199
200                 dhcpLeaseFile := "/var/lib/dhcp/dhcpd.leases"
201                 multiClusterDir := "/multi-cluster"
202
203                 //Create Directory for the specific cluster
204                 clusterDir := multiClusterDir + "/" + clusterName
205                 os.MkdirAll(clusterDir, os.ModePerm)
206
207                 //Create Maps to be used for cluster ip address to label configmap
208                 clusterLabel := make(map[string]string)
209                 clusterLabel["cluster"] = clusterName
210                 clusterData := make(map[string]string)
211
212                 //Iterate through mastersList and get all the mac addresses and IP addresses
213                 for _, masterMap := range mastersList {
214
215                         for masterLabel, master := range masterMap {
216                                 masterMAC := master.MACaddress
217                                 hostIPaddress := ""
218
219                                 if masterMAC == "" {
220                                         err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
221                                         return reconcile.Result{}, err
222                                 }
223
224                                 containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
225
226                                 //Check 'cluster-type' label for Virtlet VMs
227                                 if clusterType == "virtlet-vm" {
228                                         //Get VM IP address of master
229                                         hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
230                                         if err != nil || hostIPaddress == "" {
231                                                 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
232                                                 return reconcile.Result{}, err
233                                         }
234                                         containsMac = true
235                                 }
236
237                                 if containsMac {
238
239                                         if clusterType != "virtlet-vm" {
240                                                 fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
241
242                                                 //Get IP address of master
243                                                 hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile)
244                                                 if err != nil || hostIPaddress == "" {
245                                                         err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
246                                                         return reconcile.Result{}, err
247                                                 }
248                                                 allString += masterLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
249                                         }
250
251                                         if clusterType == "virtlet-vm" {
252                                                 allString += masterLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
253                                         }
254                                         masterString += masterLabel + "\n"
255                                         clusterData[masterTag+masterLabel] = hostIPaddress
256
257                                         fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
258
259                                         if len(workersList) != 0 {
260
261                                                 //Iterate through workersList and get all the mac addresses
262                                                 for _, workerMap := range workersList {
263
264                                                         //Get worker labels from the workermap
265                                                         for workerLabel, worker := range workerMap {
266
267                                                                 //Check if workerString already contains worker label
268                                                                 containsWorkerLabel := strings.Contains(workerString, workerLabel)
269                                                                 workerMAC := worker.MACaddress
270                                                                 hostIPaddress = ""
271
272                                                                 //Error occurs if the same label is given to different hosts (assumption,
273                                                                 //each MAC address represents a unique host
274                                                                 if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
275                                                                         if containsWorkerLabel {
276                                                                                 strings.ReplaceAll(workerString, workerLabel, "")
277                                                                         }
278                                                                         err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
279                                       different label to node with MACAddress %s`, workerLabel, workerMAC)
280                                                                         return reconcile.Result{}, err
281
282                                                                         //same node performs worker and master roles
283                                                                 } else if workerLabel == masterLabel && !containsWorkerLabel {
284                                                                         workerString += workerLabel + "\n"
285
286                                                                         //Add host to ip address config map with worker tag
287                                                                         hostIPaddress = clusterData[masterTag+masterLabel]
288                                                                         clusterData[workerTag+masterLabel] = hostIPaddress
289
290                                                                         //Error occurs if the same node is given different labels
291                                                                 } else if workerLabel != masterLabel && workerMAC == masterMAC {
292                                                                         if containsWorkerLabel {
293                                                                                 strings.ReplaceAll(workerString, workerLabel, "")
294                                                                         }
295                                                                         err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
296                                                         different label to node with MACAddress %s`, workerLabel, workerMAC)
297                                                                         return reconcile.Result{}, err
298
299                                                                         //worker node is different from any master node and it has not been added to the worker list
300                                                                 } else if workerLabel != masterLabel && !containsWorkerLabel {
301
302                                                                         // Error occurs if MAC address not provided for worker node not matching master
303                                                                         if workerMAC == "" {
304                                                                                 err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
305                                                                                 return reconcile.Result{}, err
306                                                                         }
307
308                                                                         containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
309
310                                                                         if clusterType == "virtlet-vm" {
311                                                                                 //Get VM IP address of master
312                                                                                 hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
313                                                                                 if err != nil || hostIPaddress == "" {
314                                                                                         err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
315                                                                                         return reconcile.Result{}, err
316                                                                                 }
317                                                                                 containsMac = true
318                                                                         }
319
320                                                                         if containsMac {
321
322                                                                                 if clusterType != "virtlet-vm" {
323                                                                                         fmt.Printf("Host %s matches that macAddress\n", bmhCR)
324
325                                                                                         //Get IP address of worker
326                                                                                         hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile)
327                                                                                         if err != nil {
328                                                                                                 fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
329                                                                                                 return reconcile.Result{}, err
330                                                                                         }
331                                                                                         allString += workerLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
332                                                                                 }
333                                                                                 fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
334
335                                                                                 if clusterType == "virtlet-vm" {
336                                                                                         allString += masterLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
337                                                                                 }
338                                                                                 workerString += workerLabel + "\n"
339                                                                                 clusterData[workerTag+workerLabel] = hostIPaddress
340
341                                                                                 //No host found that matches the worker MAC
342                                                                         } else {
343
344                                                                                 err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
345                                                                                 return reconcile.Result{}, err
346                                                                         }
347                                                                 }
348                                                         }
349                                                 }
350                                                 //No worker node specified, add master as worker node
351                                         } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
352                                                 workerString += masterLabel + "\n"
353
354                                                 //Add host to ip address config map with worker tag
355                                                 hostIPaddress = clusterData[masterTag+masterLabel]
356                                                 clusterData[workerTag+masterLabel] = hostIPaddress
357                                         }
358
359                                         //No host matching master MAC found
360                                 } else {
361                                         err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
362                                         return reconcile.Result{}, err
363                                 }
364                         }
365                 }
366
367                 //Create host.ini file
368                 //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini"
369                 iniHostFilePath := clusterDir + "/hosts.ini"
370                 newFile, err := os.Create(iniHostFilePath)
371                 defer newFile.Close()
372
373                 if err != nil {
374                         fmt.Printf("Error occured while creating file \n %v", err)
375                         return reconcile.Result{}, err
376                 }
377
378                 hostFile, err := ini.Load(iniHostFilePath)
379                 if err != nil {
380                         fmt.Printf("Error occured while Loading file \n %v", err)
381                         return reconcile.Result{}, err
382                 }
383
384                 _, err = hostFile.NewRawSection("all", allString)
385                 if err != nil {
386                         fmt.Printf("Error occured while creating section \n %v", err)
387                         return reconcile.Result{}, err
388                 }
389                 _, err = hostFile.NewRawSection("kube-master", masterString)
390                 if err != nil {
391                         fmt.Printf("Error occured while creating section \n %v", err)
392                         return reconcile.Result{}, err
393                 }
394
395                 _, err = hostFile.NewRawSection("kube-node", workerString)
396                 if err != nil {
397                         fmt.Printf("Error occured while creating section \n %v", err)
398                         return reconcile.Result{}, err
399                 }
400
401                 _, err = hostFile.NewRawSection("etcd", masterString)
402                 if err != nil {
403                         fmt.Printf("Error occured while creating section \n %v", err)
404                         return reconcile.Result{}, err
405                 }
406
407                 if clusterType != "virtlet-vm" {
408                         _, err = hostFile.NewRawSection("ovn-central", masterString)
409                         if err != nil {
410                                 fmt.Printf("Error occured while creating section \n %v", err)
411                                 return reconcile.Result{}, err
412                         }
413
414                         _, err = hostFile.NewRawSection("ovn-controller", workerString)
415                         if err != nil {
416                                 fmt.Printf("Error occured while creating section \n %v", err)
417                                 return reconcile.Result{}, err
418                         }
419
420                         _, err = hostFile.NewRawSection("virtlet", workerString)
421                         if err != nil {
422                                 fmt.Printf("Error occured while creating section \n %v", err)
423                                 return reconcile.Result{}, err
424                         }
425                 }
426                 _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n"+"kube-master")
427                 if err != nil {
428                         fmt.Printf("Error occured while creating section \n %v", err)
429                         return reconcile.Result{}, err
430                 }
431
432                 //Create host.ini file for KUD
433                 hostFile.SaveTo(iniHostFilePath)
434
435                 //Install KUD
436                 err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, podSubnet, kudPlugins, r.clientset)
437                 if err != nil {
438                         fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
439                         return reconcile.Result{}, err
440                 }
441
442                 //Start separate thread to keep checking job status, Create an IP address configmap
443                 //for cluster if KUD is successfully installed
444                 go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset)
445
446                 return reconcile.Result{}, nil
447
448         }
449
450         ///////////////////////////////////////////////////////////////////////////////////////////////
451         ////////////////         Software CR was created so install software         /////////////////
452         //////////////////////////////////////////////////////////////////////////////////////////////
453         softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
454         defaultSSHPrivateKey := "/root/.ssh/id_rsa"
455
456         //Get IP address configmap for the cluster
457         clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset)
458         if err != nil {
459                 fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
460                 return reconcile.Result{}, err
461         }
462
463         for hostLabel, ipAddress := range clusterConfigMapData {
464
465                 if strings.Contains(hostLabel, masterTag) {
466                         // Its a master node, install master software
467                         err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
468                         if err != nil {
469                                 fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
470                         }
471                 } else if strings.Contains(hostLabel, workerTag) {
472                         // Its a worker node, install worker software
473                         err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
474                         if err != nil {
475                                 fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
476                         }
477
478                 }
479
480         }
481
482         return reconcile.Result{}, nil
483 }
484
485 //Function to Get List containing baremetal hosts
486 func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) {
487
488         //Create GVR representing a BareMetalHost CR
489         bmhGVR := schema.GroupVersionResource{
490                 Group:    "metal3.io",
491                 Version:  "v1alpha1",
492                 Resource: "baremetalhosts",
493         }
494
495         //Get List containing all BareMetalHosts CRs
496         bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
497         if err != nil {
498                 fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
499                 return &unstructured.UnstructuredList{}, err
500         }
501
502         return bareMetalHosts, nil
503 }
504
505 //Function to check if BareMetalHost containing MAC address exist
506 func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
507
508         //Convert macAddress to byte array for comparison
509         macAddressByte := []byte(macAddress)
510         macBool := false
511
512         for _, bareMetalHost := range bareMetalHostList.Items {
513                 bmhJson, _ := bareMetalHost.MarshalJSON()
514
515                 macBool = bytes.Contains(bmhJson, macAddressByte)
516                 if macBool {
517                         return macBool, bareMetalHost.GetName()
518                 }
519
520         }
521
522         return macBool, ""
523
524 }
525
526 //Function to get the IP address of a host from the DHCP file
527 func getHostIPaddress(macAddress string, dhcpLeaseFilePath string) (string, error) {
528
529         //Read the dhcp lease file
530         dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
531         if err != nil {
532                 fmt.Printf("Failed to read lease file\n")
533                 return "", err
534         }
535
536         dhcpLeases := string(dhcpFile)
537
538         //Regex to use to search dhcpLeases
539         reg := "lease.*{|ethernet.*|\n. binding state.*"
540         re, err := regexp.Compile(reg)
541         if err != nil {
542                 fmt.Printf("Could not create Regexp object, Error %v occured\n", err)
543                 return "", err
544         }
545
546         //Get String containing leased Ip addresses and Corressponding MAC addresses
547         out := re.FindAllString(dhcpLeases, -1)
548         outString := strings.Join(out, " ")
549         stringReplacer := strings.NewReplacer("lease", "", "ethernet ", "", ";", "",
550                 " binding state", "", "{", "")
551         replaced := stringReplacer.Replace(outString)
552         ipMacList := strings.Fields(replaced)
553
554         //Get IP addresses corresponding to Input MAC Address
555         for idx := len(ipMacList) - 1; idx >= 0; idx-- {
556                 item := ipMacList[idx]
557                 if item == macAddress {
558
559                         leaseState := ipMacList[idx-1]
560                         if leaseState != "active" {
561                                 err := fmt.Errorf("No active ip address lease found for MAC address %s \n", macAddress)
562                                 fmt.Printf("%v\n", err)
563                                 return "", err
564                         }
565                         ipAdd := ipMacList[idx-2]
566                         return ipAdd, nil
567                 }
568
569         }
570         return "", nil
571 }
572
573 //Function to create configmap
574 func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error {
575
576         configmapClient := clientset.CoreV1().ConfigMaps(namespace)
577
578         configmap := &corev1.ConfigMap{
579
580                 ObjectMeta: metav1.ObjectMeta{
581                         Name:   labels["cluster"] + "-configmap",
582                         Labels: labels,
583                 },
584                 Data: data,
585         }
586
587         _, err := configmapClient.Create(configmap)
588         if err != nil {
589                 return err
590
591         }
592         return nil
593
594 }
595
596 //Function to get configmap Data
597 func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) {
598
599         configmapClient := clientset.CoreV1().ConfigMaps(namespace)
600         configmapName := clusterName + "-configmap"
601         clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{})
602         if err != nil {
603                 return nil, err
604         }
605
606         configmapData := clusterConfigmap.Data
607         return configmapData, nil
608 }
609
610 //Function to create job for KUD installation
611 func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, podSubnet string, kudPlugins []string, clientset kubernetes.Interface) error {
612
613         var backOffLimit int32 = 0
614         var privi bool = true
615
616         installerString := " ./installer --cluster " + clusterName
617         if len(podSubnet) > 0 {
618                 installerString += " --network " + podSubnet
619         }
620
621         // Check if any plugin was specified
622         if len(kudPlugins) > 0 {
623                 plugins := " --plugins"
624
625                 for _, plug := range kudPlugins {
626                         plugins += " " + plug
627                 }
628
629                 installerString += plugins
630         }
631
632         jobClient := clientset.BatchV1().Jobs("default")
633
634         job := &batchv1.Job{
635
636                 ObjectMeta: metav1.ObjectMeta{
637                         Name:   "kud-" + clusterName,
638                         Labels: labels,
639                 },
640                 Spec: batchv1.JobSpec{
641                         Template: corev1.PodTemplateSpec{
642                                 ObjectMeta: metav1.ObjectMeta{
643                                         Labels: labels,
644                                 },
645
646                                 Spec: corev1.PodSpec{
647                                         HostNetwork: true,
648                                         Containers: []corev1.Container{{
649                                                 Name:            "kud",
650                                                 Image:           "github.com/onap/multicloud-k8s:latest",
651                                                 ImagePullPolicy: "IfNotPresent",
652                                                 VolumeMounts: []corev1.VolumeMount{{
653                                                         Name:      "multi-cluster",
654                                                         MountPath: "/opt/kud/multi-cluster",
655                                                 },
656                                                         {
657                                                                 Name:      "secret-volume",
658                                                                 MountPath: "/.ssh",
659                                                         },
660                                                 },
661                                                 EnvFrom: []corev1.EnvFromSource{
662                                                         {
663                                                                 ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "kud-installer"}},
664                                                         },
665                                                 },
666                                                 Command: []string{"/bin/sh", "-c"},
667                                                 Args:    []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
668                                                 SecurityContext: &corev1.SecurityContext{
669                                                         Privileged: &privi,
670                                                 },
671                                         },
672                                         },
673                                         Volumes: []corev1.Volume{{
674                                                 Name: "multi-cluster",
675                                                 VolumeSource: corev1.VolumeSource{
676                                                         HostPath: &corev1.HostPathVolumeSource{
677                                                                 Path: "/opt/kud/multi-cluster",
678                                                         }}},
679                                                 {
680                                                         Name: "secret-volume",
681                                                         VolumeSource: corev1.VolumeSource{
682                                                                 Secret: &corev1.SecretVolumeSource{
683                                                                         SecretName: "ssh-key-secret",
684                                                                 },
685                                                         }}},
686                                         RestartPolicy: "Never",
687                                 },
688                         },
689                         BackoffLimit: &backOffLimit,
690                 },
691         }
692         _, err := jobClient.Create(job)
693         if err != nil {
694                 fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
695                 return err
696         }
697         return nil
698
699 }
700
701 //Function to Check if job succeeded
702 func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) {
703
704         fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
705         jobName := "kud-" + clusterName
706         jobClient := clientset.BatchV1().Jobs(namespace)
707
708         for {
709                 time.Sleep(2 * time.Second)
710
711                 job, err := jobClient.Get(jobName, metav1.GetOptions{})
712                 if err != nil {
713                         fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
714                         return
715                 }
716                 jobSucceeded := job.Status.Succeeded
717                 jobFailed := job.Status.Failed
718
719                 if jobSucceeded == 1 {
720                         fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
721
722                         //KUD was installed successfully create configmap to store IP address info for the cluster
723                         err = createConfigMap(data, labels, namespace, clientset)
724                         if err != nil {
725                                 fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err)
726                                 return
727                         }
728                         return
729                 }
730
731                 if jobFailed == 1 {
732                         fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
733                         return
734                 }
735
736         }
737         return
738
739 }
740
741 //Function to get software list from software CR
742 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
743
744         CRclusterName := softwareCR.GetLabels()["cluster"]
745
746         masterSofwareList := softwareCR.Spec.MasterSoftware
747         workerSoftwareList := softwareCR.Spec.WorkerSoftware
748
749         return CRclusterName, masterSofwareList, workerSoftwareList
750 }
751
752 //Function to install software in clusterHosts
753 func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
754
755         var installString string
756         for _, software := range softwareList {
757
758                 switch t := software.(type) {
759                 case string:
760                         installString += software.(string) + " "
761                 case interface{}:
762                         softwareMap, errBool := software.(map[string]interface{})
763                         if !errBool {
764                                 fmt.Printf("Error occured, cannot install software %v\n", software)
765                         }
766                         for softwareName, versionMap := range softwareMap {
767
768                                 versionMAP, _ := versionMap.(map[string]interface{})
769                                 version := versionMAP["version"].(string)
770                                 installString += softwareName + "=" + version + " "
771                         }
772                 default:
773                         fmt.Printf("invalid format %v\n", t)
774                 }
775
776         }
777
778         err := sshInstaller(installString, sshPrivateKey, ipAddress)
779         if err != nil {
780                 return err
781         }
782         return nil
783
784 }
785
786 //Function to Run Installation commands via ssh
787 func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
788
789         buffer, err := ioutil.ReadFile(sshPrivateKey)
790         if err != nil {
791                 return err
792         }
793
794         key, err := ssh.ParsePrivateKey(buffer)
795         if err != nil {
796                 return err
797         }
798
799         sshConfig := &ssh.ClientConfig{
800                 User: "root",
801                 Auth: []ssh.AuthMethod{
802                         ssh.PublicKeys(key),
803                 },
804
805                 HostKeyCallback: ssh.InsecureIgnoreHostKey(),
806         }
807
808         client, err := ssh.Dial("tcp", ipAddress+":22", sshConfig)
809         if err != nil {
810                 return err
811         }
812
813         session, err := client.NewSession()
814         if err != nil {
815                 return err
816         }
817
818         defer session.Close()
819         defer client.Close()
820
821         cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
822         err = session.Start(cmd)
823
824         if err != nil {
825                 return err
826         }
827
828         return nil
829
830 }
831
832 func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
833
834         var vmPodList []VirtletVM
835
836         pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
837         if err != nil {
838                 fmt.Printf("Could not get pod info, Error: %v\n", err)
839                 return []VirtletVM{}, err
840         }
841
842         for _, pod := range pods.Items {
843                 var podAnnotation map[string]interface{}
844                 var podStatus corev1.PodStatus
845                 var podDefaultNetStatus []NetworksStatus
846
847                 annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
848                 if err != nil {
849                         fmt.Printf("Could not get pod annotations, Error: %v\n", err)
850                         return []VirtletVM{}, err
851                 }
852
853                 json.Unmarshal([]byte(annotation), &podAnnotation)
854                 if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
855                         runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
856
857                         podStatusJson, _ := json.Marshal(pod.Status)
858                         json.Unmarshal([]byte(podStatusJson), &podStatus)
859
860                         if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
861                                 ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
862                                 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
863
864                                 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
865                         }
866                 }
867         }
868
869         return vmPodList, nil
870 }
871
872 func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
873
874         for i := 0; i < len(vmList); i++ {
875                 if vmList[i].MACaddress == macAddress {
876                         return vmList[i].IPaddress, nil
877                 }
878         }
879         return "", nil
880 }