14 bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
15 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
16 corev1 "k8s.io/api/core/v1"
17 batchv1 "k8s.io/api/batch/v1"
18 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
19 "k8s.io/apimachinery/pkg/runtime/schema"
20 "k8s.io/apimachinery/pkg/api/errors"
21 "k8s.io/apimachinery/pkg/runtime"
22 "k8s.io/apimachinery/pkg/types"
23 "k8s.io/client-go/dynamic"
25 "sigs.k8s.io/controller-runtime/pkg/client"
26 "sigs.k8s.io/controller-runtime/pkg/client/config"
27 "sigs.k8s.io/controller-runtime/pkg/controller"
28 "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
29 "sigs.k8s.io/controller-runtime/pkg/handler"
30 "sigs.k8s.io/controller-runtime/pkg/manager"
31 "sigs.k8s.io/controller-runtime/pkg/reconcile"
32 logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
33 "sigs.k8s.io/controller-runtime/pkg/source"
35 "golang.org/x/crypto/ssh"
38 type VirtletVM struct {
43 type NetworksStatus struct {
44 Name string `json:"name,omitempty"`
45 Interface string `json:"interface,omitempty"`
46 Ips []string `json:"ips,omitempty"`
47 Mac string `json:"mac,omitempty"`
48 Default bool `json:"default,omitempty"`
49 Dns interface{} `json:"dns,omitempty"`
52 var log = logf.Log.WithName("controller_provisioning")
55 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
56 * business logic. Delete these comments after modifying this file.*
59 // Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
60 // and Start it when the Manager is Started.
61 func Add(mgr manager.Manager) error {
62 return add(mgr, newReconciler(mgr))
65 // newReconciler returns a new reconcile.Reconciler
66 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
68 config, err := config.GetConfig()
70 fmt.Printf("Could not get kube config, Error: %v\n", err)
73 bmhDynamicClient, err := dynamic.NewForConfig(config)
76 fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
79 return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), bmhClient: bmhDynamicClient }
82 // add adds a new Controller to mgr with r as the reconcile.Reconciler
83 func add(mgr manager.Manager, r reconcile.Reconciler) error {
84 // Create a new controller
85 c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
90 // Watch for changes to primary resource Provisioning
91 err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
96 // Watch for changes to resource configmap created as a consequence of the provisioning CR
97 err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
99 OwnerType: &bpav1alpha1.Provisioning{},
106 //Watch for changes to job resource also created as a consequence of the provisioning CR
107 err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
109 OwnerType: &bpav1alpha1.Provisioning{},
116 // Watch for changes to resource software CR
117 err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
126 // blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
127 var _ reconcile.Reconciler = &ReconcileProvisioning{}
129 // ReconcileProvisioning reconciles a Provisioning object
130 type ReconcileProvisioning struct {
131 // This client, initialized using mgr.Client() above, is a split client
132 // that reads objects from the cache and writes to the apiserver
134 scheme *runtime.Scheme
135 bmhClient dynamic.Interface
138 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
139 // and what is in the Provisioning.Spec
140 // TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates
141 // a Pod as an example
143 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
144 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
145 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
146 reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
148 reqLogger.Info("Reconciling Custom Resource")
152 // Fetch the Provisioning instance
153 provisioningInstance := &bpav1alpha1.Provisioning{}
154 softwareInstance := &bpav1alpha1.Software{}
155 err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
156 provisioningCreated := true
159 //Check if its a Software Instance
160 err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
162 if errors.IsNotFound(err) {
163 // Request object not found, could have been deleted after reconcile request.
164 // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
165 // Return and don't requeue
166 return reconcile.Result{}, nil
169 // Error reading the object - requeue the request.
170 return reconcile.Result{}, err
173 //No error occured and so a Software CR was created not a Provisoning CR
174 provisioningCreated = false
178 masterTag := "MASTER_"
179 workerTag := "WORKER_"
181 if provisioningCreated {
183 ///////////////////////////////////////////////////////////////////////////////////////////////
184 //////////////// Provisioning CR was created so install KUD /////////////////
185 //////////////////////////////////////////////////////////////////////////////////////////////
186 provisioningVersion := provisioningInstance.ResourceVersion
187 clusterName := provisioningInstance.Labels["cluster"]
188 clusterType := provisioningInstance.Labels["cluster-type"]
189 mastersList := provisioningInstance.Spec.Masters
190 workersList := provisioningInstance.Spec.Workers
193 bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
194 virtletVMList, _ := listVirtletVMs(r.client)
199 var masterString string
200 var workerString string
202 dhcpLeaseFile := "/var/lib/dhcp/dhcpd.leases"
203 multiClusterDir := "/multi-cluster"
205 //Create Directory for the specific cluster
206 clusterDir := multiClusterDir + "/" + clusterName
207 os.MkdirAll(clusterDir, os.ModePerm)
209 //Create Maps to be used for cluster ip address to label configmap
210 clusterData := make(map[string]string)
211 clusterMACData := make(map[string]string)
215 //Iterate through mastersList and get all the mac addresses and IP addresses
216 for _, masterMap := range mastersList {
218 for masterLabel, master := range masterMap {
219 masterMAC := master.MACaddress
223 err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
224 return reconcile.Result{}, err
228 // Check if master MAC address has already been used
229 usedMAC, err := r.macAddressUsed(provisioningInstance.Namespace, masterMAC, clusterName)
234 fmt.Printf("Error occured while checking if mac Address has already been used\n %v", err)
235 return reconcile.Result{}, err
240 err = fmt.Errorf("MAC address %s has already been used, check and update provisioning CR", masterMAC)
241 return reconcile.Result{}, err
245 // Check if Baremetal host with specified MAC address exist
246 containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
248 //Check 'cluster-type' label for Virtlet VMs
249 if clusterType == "virtlet-vm" {
250 //Get VM IP address of master
251 hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
252 if err != nil || hostIPaddress == "" {
253 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
254 return reconcile.Result{}, err
261 if clusterType != "virtlet-vm" {
262 fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
264 //Get IP address of master
265 hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile )
266 if err != nil || hostIPaddress == ""{
267 err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
268 return reconcile.Result{}, err
272 allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
273 if clusterType == "virtlet-vm" {
274 allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
276 masterString += masterLabel + "\n"
277 clusterData[masterTag + masterLabel] = hostIPaddress
278 clusterMACData[strings.ReplaceAll(masterMAC, ":", "-")] = masterTag + masterLabel
280 fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
283 // Check if any worker MAC address was specified
284 if len(workersList) != 0 {
286 //Iterate through workersList and get all the mac addresses
287 for _, workerMap := range workersList {
289 //Get worker labels from the workermap
290 for workerLabel, worker := range workerMap {
292 //Check if workerString already contains worker label
293 containsWorkerLabel := strings.Contains(workerString, workerLabel)
294 workerMAC := worker.MACaddress
297 //Error occurs if the same label is given to different hosts (assumption,
298 //each MAC address represents a unique host
299 if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
300 if containsWorkerLabel {
301 strings.ReplaceAll(workerString, workerLabel, "")
303 err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
304 different label to node with MACAddress %s`, workerLabel, workerMAC)
305 return reconcile.Result{}, err
307 //same node performs worker and master roles
308 } else if workerLabel == masterLabel && !containsWorkerLabel {
309 workerString += workerLabel + "\n"
311 //Add host to ip address config map with worker tag
312 hostIPaddress = clusterData[masterTag + masterLabel]
313 clusterData[workerTag + masterLabel] = hostIPaddress
315 //Error occurs if the same node is given different labels
316 } else if workerLabel != masterLabel && workerMAC == masterMAC {
317 if containsWorkerLabel {
318 strings.ReplaceAll(workerString, workerLabel, "")
320 err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
321 different label to node with MACAddress %s`, workerLabel, workerMAC)
322 return reconcile.Result{}, err
324 //worker node is different from any master node and it has not been added to the worker list
325 } else if workerLabel != masterLabel && !containsWorkerLabel {
327 // Error occurs if MAC address not provided for worker node not matching master
329 err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
330 return reconcile.Result{}, err
333 // Check if worker MAC address has already been used
334 usedMAC, err = r.macAddressUsed(provisioningInstance.Namespace, workerMAC, clusterName)
338 fmt.Printf("Error occured while checking if mac Address has already been used\n %v", err)
339 return reconcile.Result{}, err
344 err = fmt.Errorf("MAC address %s has already been used, check and update provisioning CR", workerMAC)
345 return reconcile.Result{}, err
349 containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
351 if clusterType == "virtlet-vm" {
352 //Get VM IP address of master
353 hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
354 if err != nil || hostIPaddress == "" {
355 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
356 return reconcile.Result{}, err
363 if clusterType != "virtlet-vm" {
364 fmt.Printf("Host %s matches that macAddress\n", bmhCR)
366 //Get IP address of worker
367 hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile )
369 fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
370 return reconcile.Result{}, err
373 fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
375 allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
376 if clusterType == "virtlet-vm" {
377 allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
379 workerString += workerLabel + "\n"
380 clusterData[workerTag + workerLabel] = hostIPaddress
381 clusterMACData[strings.ReplaceAll(workerMAC, ":", "-")] = workerTag + workerLabel
383 //No host found that matches the worker MAC
386 err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
387 return reconcile.Result{}, err
392 //No worker node specified, add master as worker node
393 } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
394 workerString += masterLabel + "\n"
396 //Add host to ip address config map with worker tag
397 hostIPaddress = clusterData[masterTag + masterLabel]
398 clusterData[workerTag + masterLabel] = hostIPaddress
401 //No host matching master MAC found
403 err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
404 return reconcile.Result{}, err
409 //Create host.ini file
410 iniHostFilePath := clusterDir + "/hosts.ini"
411 newFile, err := os.Create(iniHostFilePath)
412 defer newFile.Close()
416 fmt.Printf("Error occured while creating file \n %v", err)
417 return reconcile.Result{}, err
420 hostFile, err := ini.Load(iniHostFilePath)
422 fmt.Printf("Error occured while Loading file \n %v", err)
423 return reconcile.Result{}, err
426 _, err = hostFile.NewRawSection("all", allString)
428 fmt.Printf("Error occured while creating section \n %v", err)
429 return reconcile.Result{}, err
431 _, err = hostFile.NewRawSection("kube-master", masterString)
433 fmt.Printf("Error occured while creating section \n %v", err)
434 return reconcile.Result{}, err
437 _, err = hostFile.NewRawSection("kube-node", workerString)
439 fmt.Printf("Error occured while creating section \n %v", err)
440 return reconcile.Result{}, err
443 _, err = hostFile.NewRawSection("etcd", masterString)
445 fmt.Printf("Error occured while creating section \n %v", err)
446 return reconcile.Result{}, err
449 _, err = hostFile.NewRawSection("ovn-central", masterString)
451 fmt.Printf("Error occured while creating section \n %v", err)
452 return reconcile.Result{}, err
455 _, err = hostFile.NewRawSection("ovn-controller", workerString)
457 fmt.Printf("Error occured while creating section \n %v", err)
458 return reconcile.Result{}, err
461 _, err = hostFile.NewRawSection("virtlet", workerString)
463 fmt.Printf("Error occured while creating section \n %v", err)
464 return reconcile.Result{}, err
467 _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master")
469 fmt.Printf("Error occured while creating section \n %v", err)
470 return reconcile.Result{}, err
474 //Create host.ini file for KUD
475 hostFile.SaveTo(iniHostFilePath)
477 // Create configmap to store MAC address info for the cluster
478 cmName := provisioningInstance.Labels["cluster"] + "-mac-addresses"
479 foundConfig := &corev1.ConfigMap{}
480 err = r.client.Get(context.TODO(), types.NamespacedName{Name: cmName, Namespace: provisioningInstance.Namespace}, foundConfig)
482 // Configmap was found but the provisioning CR was updated so update configMap
483 if err == nil && foundConfig.Labels["provisioning-version"] != provisioningVersion {
485 foundConfig.Data = clusterMACData
486 foundConfig.Labels = provisioningInstance.Labels
487 foundConfig.Labels["configmap-type"] = "mac-address"
488 foundConfig.Labels["provisioning-version"] = provisioningVersion
489 err = r.client.Update(context.TODO(), foundConfig)
491 fmt.Printf("Error occured while updating mac address configmap for provisioningCR %s\n ERROR: %v\n", provisioningInstance.Name,
493 return reconcile.Result{}, err
496 } else if err != nil && errors.IsNotFound(err) {
497 labels := provisioningInstance.Labels
498 labels["configmap-type"] = "mac-address"
499 labels["provisioning-version"] = provisioningVersion
500 err = r.createConfigMap(provisioningInstance, clusterMACData, labels, cmName)
502 fmt.Printf("Error occured while creating MAC address configmap for cluster %v\n ERROR: %v", clusterName, err)
503 return reconcile.Result{}, err
506 } else if err != nil {
507 fmt.Printf("ERROR occured in Create MAC address Config map section: %v\n", err)
508 return reconcile.Result{}, err
512 err = r.createKUDinstallerJob(provisioningInstance)
514 fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
515 return reconcile.Result{}, err
518 //Start separate thread to keep checking job status, Create an IP address configmap
519 //for cluster if KUD is successfully installed
520 go r.checkJob(provisioningInstance, clusterData)
522 return reconcile.Result{}, nil
528 ///////////////////////////////////////////////////////////////////////////////////////////////
529 //////////////// Software CR was created so install software /////////////////
530 //////////////////////////////////////////////////////////////////////////////////////////////
531 softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
532 defaultSSHPrivateKey := "/root/.ssh/id_rsa"
534 //Get IP address configmap for the cluster
535 configmapName := softwareInstance.Labels["cluster"] + "-configmap"
536 clusterConfigMapData, err := r.getConfigMapData(softwareInstance.Namespace, configmapName)
538 fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
539 return reconcile.Result{}, err
542 for hostLabel, ipAddress := range clusterConfigMapData {
544 if strings.Contains(hostLabel, masterTag) {
545 // Its a master node, install master software
546 err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
548 fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
550 } else if strings.Contains(hostLabel, workerTag) {
551 // Its a worker node, install worker software
552 err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
554 fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
561 return reconcile.Result{}, nil
564 //Function to Get List containing baremetal hosts
565 func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) {
567 //Create GVR representing a BareMetalHost CR
568 bmhGVR := schema.GroupVersionResource{
571 Resource: "baremetalhosts",
574 //Get List containing all BareMetalHosts CRs
575 bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
577 fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
578 return &unstructured.UnstructuredList{}, err
581 return bareMetalHosts, nil
585 //Function to check if BareMetalHost containing MAC address exist
586 func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
588 //Convert macAddress to byte array for comparison
589 macAddressByte := []byte(macAddress)
592 for _, bareMetalHost := range bareMetalHostList.Items {
593 bmhJson, _ := bareMetalHost.MarshalJSON()
595 macBool = bytes.Contains(bmhJson, macAddressByte)
597 return macBool, bareMetalHost.GetName()
606 //Function to check if MAC address has been used in another provisioning CR
607 //Returns true if MAC address has already been used
608 func (r *ReconcileProvisioning) macAddressUsed(namespace, macAddress, provisioningCluster string) (bool, error) {
610 macKey := strings.ReplaceAll(macAddress, ":", "-")
613 cmList := &corev1.ConfigMapList{}
614 label := map[string]string{"configmap-type": "mac-address"}
615 listOpts := client.MatchingLabels(label)
617 err := r.client.List(context.TODO(), listOpts, cmList)
622 for _, configmap := range cmList.Items {
624 cmCluster := configmap.Labels["cluster"]
625 if cmCluster != provisioningCluster {
626 cmData, err := r.getConfigMapData(namespace, configmap.Name)
632 if _, exist := cmData[macKey]; exist {
646 //Function to get the IP address of a host from the DHCP file
647 func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, error) {
649 //Read the dhcp lease file
650 dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
652 fmt.Printf("Failed to read lease file\n")
656 dhcpLeases := string(dhcpFile)
658 //Regex to use to search dhcpLeases
659 reg := "lease.*{|ethernet.*|\n. binding state.*"
660 re, err := regexp.Compile(reg)
662 fmt.Printf("Could not create Regexp object, Error %v occured\n", err)
666 //Get String containing leased Ip addresses and Corressponding MAC addresses
667 out := re.FindAllString(dhcpLeases, -1)
668 outString := strings.Join(out, " ")
669 stringReplacer := strings.NewReplacer("lease", "", "ethernet ", "", ";", "",
670 " binding state", "", "{", "")
671 replaced := stringReplacer.Replace(outString)
672 ipMacList := strings.Fields(replaced)
675 //Get IP addresses corresponding to Input MAC Address
676 for idx := len(ipMacList)-1 ; idx >= 0; idx -- {
677 item := ipMacList[idx]
678 if item == macAddress {
680 leaseState := ipMacList[idx -1]
681 if leaseState != "active" {
682 err := fmt.Errorf("No active ip address lease found for MAC address %s \n", macAddress)
683 fmt.Printf("%v\n", err)
686 ipAdd := ipMacList[idx - 2]
694 //Function to create configmap
695 func (r *ReconcileProvisioning) createConfigMap(p *bpav1alpha1.Provisioning, data, labels map[string]string, cmName string) error{
697 // Configmap has not been created, create it
698 configmap := &corev1.ConfigMap{
700 ObjectMeta: metav1.ObjectMeta{
702 Namespace: p.Namespace,
709 // Set provisioning instance as the owner of the job
710 controllerutil.SetControllerReference(p, configmap, r.scheme)
711 err := r.client.Create(context.TODO(), configmap)
721 //Function to get configmap Data
722 func (r *ReconcileProvisioning) getConfigMapData(namespace, configmapName string) (map[string]string, error) {
724 clusterConfigmap := &corev1.ConfigMap{}
725 err := r.client.Get(context.TODO(), types.NamespacedName{Name: configmapName, Namespace: namespace}, clusterConfigmap)
730 configmapData := clusterConfigmap.Data
731 return configmapData, nil
734 //Function to create job for KUD installation
735 func (r *ReconcileProvisioning) createKUDinstallerJob(p *bpav1alpha1.Provisioning) error{
737 var backOffLimit int32 = 0
738 var privi bool = true
741 kudPlugins := p.Spec.KUDPlugins
744 jobLabel["provisioning-version"] = p.ResourceVersion
745 jobName := "kud-" + jobLabel["cluster"]
747 // Check if the job already exist
748 foundJob := &batchv1.Job{}
749 err := r.client.Get(context.TODO(), types.NamespacedName{Name: jobName, Namespace: p.Namespace}, foundJob)
751 if (err == nil && foundJob.Labels["provisioning-version"] != jobLabel["provisioning-version"]) || (err != nil && errors.IsNotFound(err)) {
753 // If err == nil and its in this statement, then provisioning CR was updated, delete job
755 err = r.client.Delete(context.TODO(), foundJob, client.PropagationPolicy(metav1.DeletePropagationForeground))
757 fmt.Printf("Error occured while deleting kud install job for updated provisioning CR %v\n", err)
762 // Job has not been created, create a new kud job
763 installerString := " ./installer --cluster " + p.Labels["cluster"]
765 // Check if any plugin was specified
766 if len(kudPlugins) > 0 {
767 plugins := " --plugins"
769 for _, plug := range kudPlugins {
770 plugins += " " + plug
773 installerString += plugins
779 ObjectMeta: metav1.ObjectMeta{
781 Namespace: p.Namespace,
784 Spec: batchv1.JobSpec{
785 Template: corev1.PodTemplateSpec{
786 ObjectMeta: metav1.ObjectMeta{
791 Spec: corev1.PodSpec{
793 Containers: []corev1.Container{{
795 Image: "github.com/onap/multicloud-k8s:latest",
796 ImagePullPolicy: "IfNotPresent",
797 VolumeMounts: []corev1.VolumeMount{{
798 Name: "multi-cluster",
799 MountPath: "/opt/kud/multi-cluster",
802 Name: "secret-volume",
807 Command: []string{"/bin/sh","-c"},
808 Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
809 SecurityContext: &corev1.SecurityContext{
815 Volumes: []corev1.Volume{{
816 Name: "multi-cluster",
817 VolumeSource: corev1.VolumeSource{
818 HostPath: &corev1.HostPathVolumeSource{
819 Path : "/opt/kud/multi-cluster",
822 Name: "secret-volume",
823 VolumeSource: corev1.VolumeSource{
824 Secret: &corev1.SecretVolumeSource{
825 SecretName: "ssh-key-secret",
829 RestartPolicy: "Never",
833 BackoffLimit : &backOffLimit,
838 // Set provisioning instance as the owner and controller of the job
839 controllerutil.SetControllerReference(p, job, r.scheme)
840 err = r.client.Create(context.TODO(), job)
842 fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
846 } else if err != nil {
855 //Function to Check if job succeeded
856 func (r *ReconcileProvisioning) checkJob(p *bpav1alpha1.Provisioning, data map[string]string) {
858 clusterName := p.Labels["cluster"]
859 fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
860 jobName := "kud-" + clusterName
861 job := &batchv1.Job{}
865 err := r.client.Get(context.TODO(), types.NamespacedName{Name: jobName, Namespace: p.Namespace}, job)
867 fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
870 jobSucceeded := job.Status.Succeeded
871 jobFailed := job.Status.Failed
873 if jobSucceeded == 1 {
874 fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
876 //KUD was installed successfully create configmap to store IP address info for the cluster
878 labels["provisioning-version"] = p.ResourceVersion
879 cmName := labels["cluster"] + "-configmap"
880 foundConfig := &corev1.ConfigMap{}
881 err := r.client.Get(context.TODO(), types.NamespacedName{Name: cmName, Namespace: p.Namespace}, foundConfig)
883 // Check if provisioning CR was updated
884 if err == nil && foundConfig.Labels["provisioning-version"] != labels["provisioning-version"] {
886 foundConfig.Data = data
887 foundConfig.Labels = labels
888 err = r.client.Update(context.TODO(), foundConfig)
890 fmt.Printf("Error occured while updating IP address configmap for provisioningCR %s\n ERROR: %v\n", p.Name, err)
894 } else if err != nil && errors.IsNotFound(err) {
895 err = r.createConfigMap(p, data, labels, cmName)
897 fmt.Printf("Error occured while creating IP address configmap for cluster %v\n ERROR: %v", clusterName, err)
902 } else if err != nil {
903 fmt.Printf("ERROR occured while checking if IP address configmap %v already exists: %v\n", cmName, err)
912 fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
917 time.Sleep(5 * time.Second)
922 //Function to get software list from software CR
923 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
925 CRclusterName := softwareCR.GetLabels()["cluster"]
927 masterSofwareList := softwareCR.Spec.MasterSoftware
928 workerSoftwareList := softwareCR.Spec.WorkerSoftware
930 return CRclusterName, masterSofwareList, workerSoftwareList
933 //Function to install software in clusterHosts
934 func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
936 var installString string
937 for _, software := range softwareList {
939 switch t := software.(type){
941 installString += software.(string) + " "
943 softwareMap, errBool := software.(map[string]interface{})
945 fmt.Printf("Error occured, cannot install software %v\n", software)
947 for softwareName, versionMap := range softwareMap {
949 versionMAP, _ := versionMap.(map[string]interface{})
950 version := versionMAP["version"].(string)
951 installString += softwareName + "=" + version + " "
954 fmt.Printf("invalid format %v\n", t)
959 err := sshInstaller(installString, sshPrivateKey, ipAddress)
967 //Function to Run Installation commands via ssh
968 func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
970 buffer, err := ioutil.ReadFile(sshPrivateKey)
975 key, err := ssh.ParsePrivateKey(buffer)
980 sshConfig := &ssh.ClientConfig{
982 Auth: []ssh.AuthMethod{
986 HostKeyCallback: ssh.InsecureIgnoreHostKey(),
989 client, err := ssh.Dial("tcp", ipAddress + ":22", sshConfig)
994 session, err := client.NewSession()
999 defer session.Close()
1000 defer client.Close()
1002 cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
1003 err = session.Start(cmd)
1015 func listVirtletVMs(vmClient client.Client) ([]VirtletVM, error) {
1017 var vmPodList []VirtletVM
1019 pods := &corev1.PodList{}
1020 err := vmClient.List(context.TODO(), &client.ListOptions{}, pods)
1022 fmt.Printf("Could not get pod info, Error: %v\n", err)
1023 return []VirtletVM{}, err
1026 for _, pod := range pods.Items {
1027 var podAnnotation map[string]interface{}
1028 var podStatus corev1.PodStatus
1029 var podDefaultNetStatus []NetworksStatus
1031 annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
1033 fmt.Printf("Could not get pod annotations, Error: %v\n", err)
1034 return []VirtletVM{}, err
1037 json.Unmarshal([]byte(annotation), &podAnnotation)
1038 if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
1039 runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
1041 podStatusJson, _ := json.Marshal(pod.Status)
1042 json.Unmarshal([]byte(podStatusJson), &podStatus)
1044 if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
1045 ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
1046 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
1048 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
1053 return vmPodList, nil
1056 func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
1058 for i := 0; i < len(vmList); i++ {
1059 if vmList[i].MACaddress == macAddress {
1060 return vmList[i].IPaddress, nil