Remove bootstrap network
[icn.git] / cmd / bpa-operator / pkg / controller / provisioning / provisioning_controller.go
1 package provisioning
2
3 import (
4         "bytes"
5         "context"
6         "encoding/json"
7         "fmt"
8         "io/ioutil"
9         "os"
10         "strings"
11         "time"
12
13         bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
14         batchv1 "k8s.io/api/batch/v1"
15         corev1 "k8s.io/api/core/v1"
16         "k8s.io/apimachinery/pkg/api/errors"
17         metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
18         "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
19         "k8s.io/apimachinery/pkg/runtime"
20         "k8s.io/apimachinery/pkg/runtime/schema"
21         "k8s.io/client-go/dynamic"
22
23         "golang.org/x/crypto/ssh"
24         "gopkg.in/ini.v1"
25         "k8s.io/client-go/kubernetes"
26         "sigs.k8s.io/controller-runtime/pkg/client"
27         "sigs.k8s.io/controller-runtime/pkg/client/config"
28         "sigs.k8s.io/controller-runtime/pkg/controller"
29         "sigs.k8s.io/controller-runtime/pkg/handler"
30         "sigs.k8s.io/controller-runtime/pkg/manager"
31         "sigs.k8s.io/controller-runtime/pkg/reconcile"
32         logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
33         "sigs.k8s.io/controller-runtime/pkg/source"
34 )
35
36 type VirtletVM struct {
37         IPaddress  string
38         MACaddress string
39 }
40
41 type NetworksStatus struct {
42         Name      string      `json:"name,omitempty"`
43         Interface string      `json:"interface,omitempty"`
44         Ips       []string    `json:"ips,omitempty"`
45         Mac       string      `json:"mac,omitempty"`
46         Default   bool        `json:"default,omitempty"`
47         Dns       interface{} `json:"dns,omitempty"`
48 }
49
50 var log = logf.Log.WithName("controller_provisioning")
51
52 /**
53 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
54 * business logic.  Delete these comments after modifying this file.*
55  */
56
57 // Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
58 // and Start it when the Manager is Started.
59 func Add(mgr manager.Manager) error {
60         return add(mgr, newReconciler(mgr))
61 }
62
63 // newReconciler returns a new reconcile.Reconciler
64 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
65
66         config, err := config.GetConfig()
67         if err != nil {
68                 fmt.Printf("Could not get kube config, Error: %v\n", err)
69         }
70
71         clientSet, err := kubernetes.NewForConfig(config)
72         if err != nil {
73                 fmt.Printf("Could not create clientset, Error: %v\n", err)
74         }
75         bmhDynamicClient, err := dynamic.NewForConfig(config)
76
77         if err != nil {
78                 fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
79         }
80
81         return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient}
82 }
83
84 // add adds a new Controller to mgr with r as the reconcile.Reconciler
85 func add(mgr manager.Manager, r reconcile.Reconciler) error {
86         // Create a new controller
87         c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
88         if err != nil {
89                 return err
90         }
91
92         // Watch for changes to primary resource Provisioning
93         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
94         if err != nil {
95                 return err
96         }
97
98         // Watch for changes to resource configmap created as a consequence of the provisioning CR
99         err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
100                 IsController: true,
101                 OwnerType:    &bpav1alpha1.Provisioning{},
102         })
103
104         if err != nil {
105                 return err
106         }
107
108         //Watch for changes to job resource also created as a consequence of the provisioning CR
109         err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
110                 IsController: true,
111                 OwnerType:    &bpav1alpha1.Provisioning{},
112         })
113
114         if err != nil {
115                 return err
116         }
117
118         // Watch for changes to resource software CR
119         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
120         if err != nil {
121                 return err
122         }
123
124         return nil
125 }
126
127 // blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
128 var _ reconcile.Reconciler = &ReconcileProvisioning{}
129
130 // ReconcileProvisioning reconciles a Provisioning object
131 type ReconcileProvisioning struct {
132         // This client, initialized using mgr.Client() above, is a split client
133         // that reads objects from the cache and writes to the apiserver
134         client    client.Client
135         scheme    *runtime.Scheme
136         clientset kubernetes.Interface
137         bmhClient dynamic.Interface
138 }
139
140 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
141 // and what is in the Provisioning.Spec
142 // TODO(user): Modify this Reconcile function to implement your Controller logic.  This example creates
143 // a Pod as an example
144 // Note:
145 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
146 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
147 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
148         reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
149         fmt.Printf("\n\n")
150         reqLogger.Info("Reconciling Custom Resource")
151
152         // Fetch the Provisioning instance
153         provisioningInstance := &bpav1alpha1.Provisioning{}
154         softwareInstance := &bpav1alpha1.Software{}
155         err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
156         provisioningCreated := true
157         if err != nil {
158
159                 //Check if its a Software Instance
160                 err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
161                 if err != nil {
162                         if errors.IsNotFound(err) {
163                                 // Request object not found, could have been deleted after reconcile request.
164                                 // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
165                                 // Return and don't requeue
166                                 return reconcile.Result{}, nil
167                         }
168
169                         // Error reading the object - requeue the request.
170                         return reconcile.Result{}, err
171                 }
172
173                 //No error occured and so a Software CR was created not a Provisoning CR
174                 provisioningCreated = false
175         }
176
177         masterTag := "MASTER_"
178         workerTag := "WORKER_"
179
180         if provisioningCreated {
181
182                 ///////////////////////////////////////////////////////////////////////////////////////////////
183                 ////////////////         Provisioning CR was created so install KUD          /////////////////
184                 //////////////////////////////////////////////////////////////////////////////////////////////
185                 clusterName := provisioningInstance.Labels["cluster"]
186                 clusterType := provisioningInstance.Labels["cluster-type"]
187                 mastersList := provisioningInstance.Spec.Masters
188                 workersList := provisioningInstance.Spec.Workers
189                 kudPlugins := provisioningInstance.Spec.KUDPlugins
190                 podSubnet := provisioningInstance.Spec.PodSubnet
191
192                 bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
193                 virtletVMList, _ := listVirtletVMs(r.clientset)
194
195                 var allString string
196                 var masterString string
197                 var workerString string
198
199                 multiClusterDir := "/multi-cluster"
200
201                 //Create Directory for the specific cluster
202                 clusterDir := multiClusterDir + "/" + clusterName
203                 os.MkdirAll(clusterDir, os.ModePerm)
204
205                 //Create Maps to be used for cluster ip address to label configmap
206                 clusterLabel := make(map[string]string)
207                 clusterLabel["cluster"] = clusterName
208                 clusterData := make(map[string]string)
209
210                 //Iterate through mastersList and get all the mac addresses and IP addresses
211                 for _, masterMap := range mastersList {
212
213                         for masterLabel, master := range masterMap {
214                                 masterMAC := master.MACaddress
215                                 hostIPaddress := ""
216
217                                 if masterMAC == "" {
218                                         err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
219                                         return reconcile.Result{}, err
220                                 }
221
222                                 containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
223
224                                 //Check 'cluster-type' label for Virtlet VMs
225                                 if clusterType == "virtlet-vm" {
226                                         //Get VM IP address of master
227                                         hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
228                                         if err != nil || hostIPaddress == "" {
229                                                 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
230                                                 return reconcile.Result{}, err
231                                         }
232                                         containsMac = true
233                                 }
234
235                                 if containsMac {
236
237                                         if clusterType != "virtlet-vm" {
238                                                 fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
239
240                                                 //Get IP address of master
241                                                 hostIPaddress, err = getHostIPaddress(bareMetalHostList, masterMAC)
242                                                 if err != nil || hostIPaddress == "" {
243                                                         err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
244                                                         return reconcile.Result{}, err
245                                                 }
246                                                 allString += masterLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
247                                         }
248
249                                         if clusterType == "virtlet-vm" {
250                                                 allString += masterLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
251                                         }
252                                         masterString += masterLabel + "\n"
253                                         clusterData[masterTag+masterLabel] = hostIPaddress
254
255                                         fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
256
257                                         if len(workersList) != 0 {
258
259                                                 //Iterate through workersList and get all the mac addresses
260                                                 for _, workerMap := range workersList {
261
262                                                         //Get worker labels from the workermap
263                                                         for workerLabel, worker := range workerMap {
264
265                                                                 //Check if workerString already contains worker label
266                                                                 containsWorkerLabel := strings.Contains(workerString, workerLabel)
267                                                                 workerMAC := worker.MACaddress
268                                                                 hostIPaddress = ""
269
270                                                                 //Error occurs if the same label is given to different hosts (assumption,
271                                                                 //each MAC address represents a unique host
272                                                                 if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
273                                                                         if containsWorkerLabel {
274                                                                                 strings.ReplaceAll(workerString, workerLabel, "")
275                                                                         }
276                                                                         err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
277                                       different label to node with MACAddress %s`, workerLabel, workerMAC)
278                                                                         return reconcile.Result{}, err
279
280                                                                         //same node performs worker and master roles
281                                                                 } else if workerLabel == masterLabel && !containsWorkerLabel {
282                                                                         workerString += workerLabel + "\n"
283
284                                                                         //Add host to ip address config map with worker tag
285                                                                         hostIPaddress = clusterData[masterTag+masterLabel]
286                                                                         clusterData[workerTag+masterLabel] = hostIPaddress
287
288                                                                         //Error occurs if the same node is given different labels
289                                                                 } else if workerLabel != masterLabel && workerMAC == masterMAC {
290                                                                         if containsWorkerLabel {
291                                                                                 strings.ReplaceAll(workerString, workerLabel, "")
292                                                                         }
293                                                                         err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
294                                                         different label to node with MACAddress %s`, workerLabel, workerMAC)
295                                                                         return reconcile.Result{}, err
296
297                                                                         //worker node is different from any master node and it has not been added to the worker list
298                                                                 } else if workerLabel != masterLabel && !containsWorkerLabel {
299
300                                                                         // Error occurs if MAC address not provided for worker node not matching master
301                                                                         if workerMAC == "" {
302                                                                                 err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
303                                                                                 return reconcile.Result{}, err
304                                                                         }
305
306                                                                         containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
307
308                                                                         if clusterType == "virtlet-vm" {
309                                                                                 //Get VM IP address of master
310                                                                                 hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
311                                                                                 if err != nil || hostIPaddress == "" {
312                                                                                         err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
313                                                                                         return reconcile.Result{}, err
314                                                                                 }
315                                                                                 containsMac = true
316                                                                         }
317
318                                                                         if containsMac {
319
320                                                                                 if clusterType != "virtlet-vm" {
321                                                                                         fmt.Printf("Host %s matches that macAddress\n", bmhCR)
322
323                                                                                         //Get IP address of worker
324                                                                                         hostIPaddress, err = getHostIPaddress(bareMetalHostList, workerMAC)
325                                                                                         if err != nil {
326                                                                                                 fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
327                                                                                                 return reconcile.Result{}, err
328                                                                                         }
329                                                                                         allString += workerLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
330                                                                                 }
331                                                                                 fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
332
333                                                                                 if clusterType == "virtlet-vm" {
334                                                                                         allString += masterLabel + "  ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
335                                                                                 }
336                                                                                 workerString += workerLabel + "\n"
337                                                                                 clusterData[workerTag+workerLabel] = hostIPaddress
338
339                                                                                 //No host found that matches the worker MAC
340                                                                         } else {
341
342                                                                                 err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
343                                                                                 return reconcile.Result{}, err
344                                                                         }
345                                                                 }
346                                                         }
347                                                 }
348                                                 //No worker node specified, add master as worker node
349                                         } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
350                                                 workerString += masterLabel + "\n"
351
352                                                 //Add host to ip address config map with worker tag
353                                                 hostIPaddress = clusterData[masterTag+masterLabel]
354                                                 clusterData[workerTag+masterLabel] = hostIPaddress
355                                         }
356
357                                         //No host matching master MAC found
358                                 } else {
359                                         err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
360                                         return reconcile.Result{}, err
361                                 }
362                         }
363                 }
364
365                 //Create host.ini file
366                 //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini"
367                 iniHostFilePath := clusterDir + "/hosts.ini"
368                 newFile, err := os.Create(iniHostFilePath)
369                 defer newFile.Close()
370
371                 if err != nil {
372                         fmt.Printf("Error occured while creating file \n %v", err)
373                         return reconcile.Result{}, err
374                 }
375
376                 hostFile, err := ini.Load(iniHostFilePath)
377                 if err != nil {
378                         fmt.Printf("Error occured while Loading file \n %v", err)
379                         return reconcile.Result{}, err
380                 }
381
382                 _, err = hostFile.NewRawSection("all", allString)
383                 if err != nil {
384                         fmt.Printf("Error occured while creating section \n %v", err)
385                         return reconcile.Result{}, err
386                 }
387                 _, err = hostFile.NewRawSection("kube-master", masterString)
388                 if err != nil {
389                         fmt.Printf("Error occured while creating section \n %v", err)
390                         return reconcile.Result{}, err
391                 }
392
393                 _, err = hostFile.NewRawSection("kube-node", workerString)
394                 if err != nil {
395                         fmt.Printf("Error occured while creating section \n %v", err)
396                         return reconcile.Result{}, err
397                 }
398
399                 _, err = hostFile.NewRawSection("etcd", masterString)
400                 if err != nil {
401                         fmt.Printf("Error occured while creating section \n %v", err)
402                         return reconcile.Result{}, err
403                 }
404
405                 if clusterType != "virtlet-vm" {
406                         _, err = hostFile.NewRawSection("ovn-central", masterString)
407                         if err != nil {
408                                 fmt.Printf("Error occured while creating section \n %v", err)
409                                 return reconcile.Result{}, err
410                         }
411
412                         _, err = hostFile.NewRawSection("ovn-controller", workerString)
413                         if err != nil {
414                                 fmt.Printf("Error occured while creating section \n %v", err)
415                                 return reconcile.Result{}, err
416                         }
417
418                         _, err = hostFile.NewRawSection("virtlet", workerString)
419                         if err != nil {
420                                 fmt.Printf("Error occured while creating section \n %v", err)
421                                 return reconcile.Result{}, err
422                         }
423                 }
424                 _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n"+"kube-master")
425                 if err != nil {
426                         fmt.Printf("Error occured while creating section \n %v", err)
427                         return reconcile.Result{}, err
428                 }
429
430                 //Create host.ini file for KUD
431                 hostFile.SaveTo(iniHostFilePath)
432
433                 //Install KUD
434                 err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, podSubnet, kudPlugins, r.clientset)
435                 if err != nil {
436                         fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
437                         return reconcile.Result{}, err
438                 }
439
440                 //Start separate thread to keep checking job status, Create an IP address configmap
441                 //for cluster if KUD is successfully installed
442                 go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset)
443
444                 return reconcile.Result{}, nil
445
446         }
447
448         ///////////////////////////////////////////////////////////////////////////////////////////////
449         ////////////////         Software CR was created so install software         /////////////////
450         //////////////////////////////////////////////////////////////////////////////////////////////
451         softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
452         defaultSSHPrivateKey := "/root/.ssh/id_rsa"
453
454         //Get IP address configmap for the cluster
455         clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset)
456         if err != nil {
457                 fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
458                 return reconcile.Result{}, err
459         }
460
461         for hostLabel, ipAddress := range clusterConfigMapData {
462
463                 if strings.Contains(hostLabel, masterTag) {
464                         // Its a master node, install master software
465                         err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
466                         if err != nil {
467                                 fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
468                         }
469                 } else if strings.Contains(hostLabel, workerTag) {
470                         // Its a worker node, install worker software
471                         err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
472                         if err != nil {
473                                 fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
474                         }
475
476                 }
477
478         }
479
480         return reconcile.Result{}, nil
481 }
482
483 //Function to Get List containing baremetal hosts
484 func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) {
485
486         //Create GVR representing a BareMetalHost CR
487         bmhGVR := schema.GroupVersionResource{
488                 Group:    "metal3.io",
489                 Version:  "v1alpha1",
490                 Resource: "baremetalhosts",
491         }
492
493         //Get List containing all BareMetalHosts CRs
494         bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
495         if err != nil {
496                 fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
497                 return &unstructured.UnstructuredList{}, err
498         }
499
500         return bareMetalHosts, nil
501 }
502
503 //Function to check if BareMetalHost containing MAC address exist
504 func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
505
506         //Convert macAddress to byte array for comparison
507         macAddressByte := []byte(macAddress)
508         macBool := false
509
510         for _, bareMetalHost := range bareMetalHostList.Items {
511                 bmhJson, _ := bareMetalHost.MarshalJSON()
512
513                 macBool = bytes.Contains(bmhJson, macAddressByte)
514                 if macBool {
515                         return macBool, bareMetalHost.GetName()
516                 }
517
518         }
519
520         return macBool, ""
521
522 }
523
524 //Function to get the IP address of a host from the BareMetalHost resource
525 func getHostIPaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (string, error) {
526
527         for _, bareMetalHost := range bareMetalHostList.Items {
528                 status, ok := bareMetalHost.Object["status"].(map[string]interface{})
529                 if !ok {
530                         continue
531                 }
532                 hardware, ok := status["hardware"].(map[string]interface{})
533                 if !ok {
534                         continue
535                 }
536                 nics, ok := hardware["nics"].([]interface{})
537                 if !ok {
538                         continue
539                 }
540                 for _, nic := range nics {
541                         n, ok := nic.(map[string]interface{})
542                         if !ok {
543                                 continue
544                         }
545                         ip, ok := n["ip"].(string)
546                         if !ok {
547                                 continue
548                         }
549                         if macAddress == n["mac"] {
550                                 return ip, nil
551                         }
552                 }
553         }
554         return "", nil
555 }
556
557 //Function to create configmap
558 func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error {
559
560         configmapClient := clientset.CoreV1().ConfigMaps(namespace)
561
562         configmap := &corev1.ConfigMap{
563
564                 ObjectMeta: metav1.ObjectMeta{
565                         Name:   labels["cluster"] + "-configmap",
566                         Labels: labels,
567                 },
568                 Data: data,
569         }
570
571         _, err := configmapClient.Create(configmap)
572         if err != nil {
573                 return err
574
575         }
576         return nil
577
578 }
579
580 //Function to get configmap Data
581 func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) {
582
583         configmapClient := clientset.CoreV1().ConfigMaps(namespace)
584         configmapName := clusterName + "-configmap"
585         clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{})
586         if err != nil {
587                 return nil, err
588         }
589
590         configmapData := clusterConfigmap.Data
591         return configmapData, nil
592 }
593
594 //Function to create job for KUD installation
595 func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, podSubnet string, kudPlugins []string, clientset kubernetes.Interface) error {
596
597         var backOffLimit int32 = 0
598         var privi bool = true
599
600         installerString := " ./installer --cluster " + clusterName
601         if len(podSubnet) > 0 {
602                 installerString += " --network " + podSubnet
603         }
604
605         // Check if any plugin was specified
606         if len(kudPlugins) > 0 {
607                 plugins := " --plugins"
608
609                 for _, plug := range kudPlugins {
610                         plugins += " " + plug
611                 }
612
613                 installerString += plugins
614         }
615
616         jobClient := clientset.BatchV1().Jobs("default")
617
618         job := &batchv1.Job{
619
620                 ObjectMeta: metav1.ObjectMeta{
621                         Name:   "kud-" + clusterName,
622                         Labels: labels,
623                 },
624                 Spec: batchv1.JobSpec{
625                         Template: corev1.PodTemplateSpec{
626                                 ObjectMeta: metav1.ObjectMeta{
627                                         Labels: labels,
628                                 },
629
630                                 Spec: corev1.PodSpec{
631                                         HostNetwork: true,
632                                         Containers: []corev1.Container{{
633                                                 Name:            "kud",
634                                                 Image:           "github.com/onap/multicloud-k8s:latest",
635                                                 ImagePullPolicy: "IfNotPresent",
636                                                 VolumeMounts: []corev1.VolumeMount{{
637                                                         Name:      "multi-cluster",
638                                                         MountPath: "/opt/kud/multi-cluster",
639                                                 },
640                                                         {
641                                                                 Name:      "secret-volume",
642                                                                 MountPath: "/.ssh",
643                                                         },
644                                                 },
645                                                 EnvFrom: []corev1.EnvFromSource{
646                                                         {
647                                                                 ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "kud-installer"}},
648                                                         },
649                                                 },
650                                                 Command: []string{"/bin/sh", "-c"},
651                                                 Args:    []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
652                                                 SecurityContext: &corev1.SecurityContext{
653                                                         Privileged: &privi,
654                                                 },
655                                         },
656                                         },
657                                         Volumes: []corev1.Volume{{
658                                                 Name: "multi-cluster",
659                                                 VolumeSource: corev1.VolumeSource{
660                                                         HostPath: &corev1.HostPathVolumeSource{
661                                                                 Path: "/opt/kud/multi-cluster",
662                                                         }}},
663                                                 {
664                                                         Name: "secret-volume",
665                                                         VolumeSource: corev1.VolumeSource{
666                                                                 Secret: &corev1.SecretVolumeSource{
667                                                                         SecretName: "ssh-key-secret",
668                                                                 },
669                                                         }}},
670                                         RestartPolicy: "Never",
671                                 },
672                         },
673                         BackoffLimit: &backOffLimit,
674                 },
675         }
676         _, err := jobClient.Create(job)
677         if err != nil {
678                 fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
679                 return err
680         }
681         return nil
682
683 }
684
685 //Function to Check if job succeeded
686 func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) {
687
688         fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
689         jobName := "kud-" + clusterName
690         jobClient := clientset.BatchV1().Jobs(namespace)
691
692         for {
693                 time.Sleep(2 * time.Second)
694
695                 job, err := jobClient.Get(jobName, metav1.GetOptions{})
696                 if err != nil {
697                         fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
698                         return
699                 }
700                 jobSucceeded := job.Status.Succeeded
701                 jobFailed := job.Status.Failed
702
703                 if jobSucceeded == 1 {
704                         fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
705
706                         //KUD was installed successfully create configmap to store IP address info for the cluster
707                         err = createConfigMap(data, labels, namespace, clientset)
708                         if err != nil {
709                                 fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err)
710                                 return
711                         }
712                         return
713                 }
714
715                 if jobFailed == 1 {
716                         fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
717                         return
718                 }
719
720         }
721         return
722
723 }
724
725 //Function to get software list from software CR
726 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
727
728         CRclusterName := softwareCR.GetLabels()["cluster"]
729
730         masterSofwareList := softwareCR.Spec.MasterSoftware
731         workerSoftwareList := softwareCR.Spec.WorkerSoftware
732
733         return CRclusterName, masterSofwareList, workerSoftwareList
734 }
735
736 //Function to install software in clusterHosts
737 func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
738
739         var installString string
740         for _, software := range softwareList {
741
742                 switch t := software.(type) {
743                 case string:
744                         installString += software.(string) + " "
745                 case interface{}:
746                         softwareMap, errBool := software.(map[string]interface{})
747                         if !errBool {
748                                 fmt.Printf("Error occured, cannot install software %v\n", software)
749                         }
750                         for softwareName, versionMap := range softwareMap {
751
752                                 versionMAP, _ := versionMap.(map[string]interface{})
753                                 version := versionMAP["version"].(string)
754                                 installString += softwareName + "=" + version + " "
755                         }
756                 default:
757                         fmt.Printf("invalid format %v\n", t)
758                 }
759
760         }
761
762         err := sshInstaller(installString, sshPrivateKey, ipAddress)
763         if err != nil {
764                 return err
765         }
766         return nil
767
768 }
769
770 //Function to Run Installation commands via ssh
771 func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
772
773         buffer, err := ioutil.ReadFile(sshPrivateKey)
774         if err != nil {
775                 return err
776         }
777
778         key, err := ssh.ParsePrivateKey(buffer)
779         if err != nil {
780                 return err
781         }
782
783         sshConfig := &ssh.ClientConfig{
784                 User: "root",
785                 Auth: []ssh.AuthMethod{
786                         ssh.PublicKeys(key),
787                 },
788
789                 HostKeyCallback: ssh.InsecureIgnoreHostKey(),
790         }
791
792         client, err := ssh.Dial("tcp", ipAddress+":22", sshConfig)
793         if err != nil {
794                 return err
795         }
796
797         session, err := client.NewSession()
798         if err != nil {
799                 return err
800         }
801
802         defer session.Close()
803         defer client.Close()
804
805         cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
806         err = session.Start(cmd)
807
808         if err != nil {
809                 return err
810         }
811
812         return nil
813
814 }
815
816 func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
817
818         var vmPodList []VirtletVM
819
820         pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
821         if err != nil {
822                 fmt.Printf("Could not get pod info, Error: %v\n", err)
823                 return []VirtletVM{}, err
824         }
825
826         for _, pod := range pods.Items {
827                 var podAnnotation map[string]interface{}
828                 var podStatus corev1.PodStatus
829                 var podDefaultNetStatus []NetworksStatus
830
831                 annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
832                 if err != nil {
833                         fmt.Printf("Could not get pod annotations, Error: %v\n", err)
834                         return []VirtletVM{}, err
835                 }
836
837                 json.Unmarshal([]byte(annotation), &podAnnotation)
838                 if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
839                         runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
840
841                         podStatusJson, _ := json.Marshal(pod.Status)
842                         json.Unmarshal([]byte(podStatusJson), &podStatus)
843
844                         if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
845                                 ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
846                                 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
847
848                                 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
849                         }
850                 }
851         }
852
853         return vmPodList, nil
854 }
855
856 func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
857
858         for i := 0; i < len(vmList); i++ {
859                 if vmList[i].MACaddress == macAddress {
860                         return vmList[i].IPaddress, nil
861                 }
862         }
863         return "", nil
864 }