14 bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
15 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
16 corev1 "k8s.io/api/core/v1"
17 batchv1 "k8s.io/api/batch/v1"
18 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
19 "k8s.io/apimachinery/pkg/runtime/schema"
20 "k8s.io/apimachinery/pkg/api/errors"
21 "k8s.io/apimachinery/pkg/runtime"
22 "k8s.io/client-go/dynamic"
23 "k8s.io/client-go/rest"
25 "k8s.io/client-go/kubernetes"
26 "sigs.k8s.io/controller-runtime/pkg/client"
27 "sigs.k8s.io/controller-runtime/pkg/client/config"
28 "sigs.k8s.io/controller-runtime/pkg/controller"
29 "sigs.k8s.io/controller-runtime/pkg/handler"
30 "sigs.k8s.io/controller-runtime/pkg/manager"
31 "sigs.k8s.io/controller-runtime/pkg/reconcile"
32 logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
33 "sigs.k8s.io/controller-runtime/pkg/source"
35 "golang.org/x/crypto/ssh"
38 type VirtletVM struct {
43 type NetworksStatus struct {
44 Name string `json:"name,omitempty"`
45 Interface string `json:"interface,omitempty"`
46 Ips []string `json:"ips,omitempty"`
47 Mac string `json:"mac,omitempty"`
48 Default bool `json:"default,omitempty"`
49 Dns interface{} `json:"dns,omitempty"`
52 var log = logf.Log.WithName("controller_provisioning")
55 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
56 * business logic. Delete these comments after modifying this file.*
59 // Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
60 // and Start it when the Manager is Started.
61 func Add(mgr manager.Manager) error {
62 return add(mgr, newReconciler(mgr))
65 // newReconciler returns a new reconcile.Reconciler
66 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
67 return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme()}
70 // add adds a new Controller to mgr with r as the reconcile.Reconciler
71 func add(mgr manager.Manager, r reconcile.Reconciler) error {
72 // Create a new controller
73 c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
78 // Watch for changes to primary resource Provisioning
79 err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
84 // Watch for changes to resource configmap created as a consequence of the provisioning CR
85 err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
87 OwnerType: &bpav1alpha1.Provisioning{},
94 //Watch for changes to job resource also created as a consequence of the provisioning CR
95 err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
97 OwnerType: &bpav1alpha1.Provisioning{},
104 // Watch for changes to resource software CR
105 err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
114 // blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
115 var _ reconcile.Reconciler = &ReconcileProvisioning{}
117 // ReconcileProvisioning reconciles a Provisioning object
118 type ReconcileProvisioning struct {
119 // This client, initialized using mgr.Client() above, is a split client
120 // that reads objects from the cache and writes to the apiserver
122 scheme *runtime.Scheme
125 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
126 // and what is in the Provisioning.Spec
127 // TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates
128 // a Pod as an example
130 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
131 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
132 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
133 reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
134 //reqLogger.Info("Reconciling Provisioning")
136 reqLogger.Info("Reconciling Custom Resource")
140 // Fetch the Provisioning instance
141 provisioningInstance := &bpav1alpha1.Provisioning{}
142 softwareInstance := &bpav1alpha1.Software{}
143 err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
144 provisioningCreated := true
147 //Check if its a Software Instance
148 err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
150 if errors.IsNotFound(err) {
151 // Request object not found, could have been deleted after reconcile request.
152 // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
153 // Return and don't requeue
154 return reconcile.Result{}, nil
157 // Error reading the object - requeue the request.
158 return reconcile.Result{}, err
161 //No error occured and so a Software CR was created not a Provisoning CR
162 provisioningCreated = false
166 masterTag := "MASTER_"
167 workerTag := "WORKER_"
169 config, err := config.GetConfig()
171 fmt.Printf("Could not get kube config, Error: %v\n", err)
172 return reconcile.Result{}, err
175 clientset, err := kubernetes.NewForConfig(config)
177 fmt.Printf("Could not create clientset, Error: %v\n", err)
178 return reconcile.Result{}, err
180 if provisioningCreated {
182 ///////////////////////////////////////////////////////////////////////////////////////////////
183 //////////////// Provisioning CR was created so install KUD /////////////////
184 //////////////////////////////////////////////////////////////////////////////////////////////
185 clusterName := provisioningInstance.Labels["cluster"]
186 clusterType := provisioningInstance.Labels["cluster-type"]
187 mastersList := provisioningInstance.Spec.Masters
188 workersList := provisioningInstance.Spec.Workers
189 dhcpLeaseFile := provisioningInstance.Spec.DHCPleaseFile
190 kudInstallerScript := provisioningInstance.Spec.KUDInstaller
191 multiClusterDir := provisioningInstance.Spec.MultiClusterPath
194 bareMetalHostList, _ := listBareMetalHosts(config)
195 virtletVMList, _ := listVirtletVMs()
198 var masterString string
199 var workerString string
201 defaultDHCPFile := "/var/lib/dhcp/dhcpd.leases"
202 defaultKUDInstallerPath := "/multicloud-k8s/kud/hosting_providers/vagrant"
203 defaultMultiClusterDir := "/multi-cluster"
205 //Give Default values for paths if no path is given in the CR
206 if dhcpLeaseFile == "" {
207 dhcpLeaseFile = defaultDHCPFile
210 if kudInstallerScript == "" {
211 kudInstallerScript = defaultKUDInstallerPath
214 if multiClusterDir == "" {
215 multiClusterDir = defaultMultiClusterDir
218 //Create Directory for the specific cluster
219 clusterDir := multiClusterDir + "/" + clusterName
220 os.MkdirAll(clusterDir, os.ModePerm)
222 //Create Maps to be used for cluster ip address to label configmap
223 clusterLabel := make(map[string]string)
224 clusterLabel["cluster"] = clusterName
225 clusterData := make(map[string]string)
229 //Iterate through mastersList and get all the mac addresses and IP addresses
230 for _, masterMap := range mastersList {
232 for masterLabel, master := range masterMap {
233 masterMAC := master.MACaddress
237 err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
238 return reconcile.Result{}, err
241 containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
243 //Check 'cluster-type' label for Virtlet VMs
244 if clusterType == "virtlet-vm" {
245 //Get VM IP address of master
246 hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
247 if err != nil || hostIPaddress == "" {
248 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
249 return reconcile.Result{}, err
256 if clusterType != "virtlet-vm" {
257 fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
259 //Get IP address of master
260 hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile )
261 if err != nil || hostIPaddress == ""{
262 err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
263 return reconcile.Result{}, err
267 allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
268 masterString += masterLabel + "\n"
269 clusterData[masterTag + masterLabel] = hostIPaddress
271 fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
273 if len(workersList) != 0 {
275 //Iterate through workersList and get all the mac addresses
276 for _, workerMap := range workersList {
278 //Get worker labels from the workermap
279 for workerLabel, worker := range workerMap {
281 //Check if workerString already contains worker label
282 containsWorkerLabel := strings.Contains(workerString, workerLabel)
283 workerMAC := worker.MACaddress
286 //Error occurs if the same label is given to different hosts (assumption,
287 //each MAC address represents a unique host
288 if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
289 if containsWorkerLabel {
290 strings.ReplaceAll(workerString, workerLabel, "")
292 err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
293 different label to node with MACAddress %s`, workerLabel, workerMAC)
294 return reconcile.Result{}, err
296 //same node performs worker and master roles
297 } else if workerLabel == masterLabel && !containsWorkerLabel {
298 workerString += workerLabel + "\n"
300 //Add host to ip address config map with worker tag
301 hostIPaddress = clusterData[masterTag + masterLabel]
302 clusterData[workerTag + masterLabel] = hostIPaddress
304 //Error occurs if the same node is given different labels
305 } else if workerLabel != masterLabel && workerMAC == masterMAC {
306 if containsWorkerLabel {
307 strings.ReplaceAll(workerString, workerLabel, "")
309 err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
310 different label to node with MACAddress %s`, workerLabel, workerMAC)
311 return reconcile.Result{}, err
313 //worker node is different from any master node and it has not been added to the worker list
314 } else if workerLabel != masterLabel && !containsWorkerLabel {
316 // Error occurs if MAC address not provided for worker node not matching master
318 err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
319 return reconcile.Result{}, err
322 containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
324 if clusterType == "virtlet-vm" {
325 //Get VM IP address of master
326 hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
327 if err != nil || hostIPaddress == "" {
328 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
329 return reconcile.Result{}, err
336 if clusterType != "virtlet-vm" {
337 fmt.Printf("Host %s matches that macAddress\n", bmhCR)
339 //Get IP address of worker
340 hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile )
342 fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
343 return reconcile.Result{}, err
346 fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
349 allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
350 workerString += workerLabel + "\n"
351 clusterData[workerTag + workerLabel] = hostIPaddress
353 //No host found that matches the worker MAC
356 err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
357 return reconcile.Result{}, err
362 //No worker node specified, add master as worker node
363 } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
364 workerString += masterLabel + "\n"
366 //Add host to ip address config map with worker tag
367 hostIPaddress = clusterData[masterTag + masterLabel]
368 clusterData[workerTag + masterLabel] = hostIPaddress
371 //No host matching master MAC found
373 err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
374 return reconcile.Result{}, err
379 //Create host.ini file
380 //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini"
381 iniHostFilePath := clusterDir + "/hosts.ini"
382 newFile, err := os.Create(iniHostFilePath)
383 defer newFile.Close()
387 fmt.Printf("Error occured while creating file \n %v", err)
388 return reconcile.Result{}, err
391 hostFile, err := ini.Load(iniHostFilePath)
393 fmt.Printf("Error occured while Loading file \n %v", err)
394 return reconcile.Result{}, err
397 _, err = hostFile.NewRawSection("all", allString)
399 fmt.Printf("Error occured while creating section \n %v", err)
400 return reconcile.Result{}, err
402 _, err = hostFile.NewRawSection("kube-master", masterString)
404 fmt.Printf("Error occured while creating section \n %v", err)
405 return reconcile.Result{}, err
408 _, err = hostFile.NewRawSection("kube-node", workerString)
410 fmt.Printf("Error occured while creating section \n %v", err)
411 return reconcile.Result{}, err
414 _, err = hostFile.NewRawSection("etcd", masterString)
416 fmt.Printf("Error occured while creating section \n %v", err)
417 return reconcile.Result{}, err
420 if clusterType == "virtlet-vm" {
421 _, err = hostFile.NewRawSection("ovn-central", masterString)
423 fmt.Printf("Error occured while creating section \n %v", err)
424 return reconcile.Result{}, err
426 _, err = hostFile.NewRawSection("ovn-controller", masterString)
428 fmt.Printf("Error occured while creating section \n %v", err)
429 return reconcile.Result{}, err
433 _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master")
435 fmt.Printf("Error occured while creating section \n %v", err)
436 return reconcile.Result{}, err
440 //Create host.ini file for KUD
441 hostFile.SaveTo(iniHostFilePath)
444 err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, clientset)
446 fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
447 return reconcile.Result{}, err
450 //Start separate thread to keep checking job status, Create an IP address configmap
451 //for cluster if KUD is successfully installed
452 go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, clientset)
454 return reconcile.Result{}, nil
460 ///////////////////////////////////////////////////////////////////////////////////////////////
461 //////////////// Software CR was created so install software /////////////////
462 //////////////////////////////////////////////////////////////////////////////////////////////
463 softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
464 defaultSSHPrivateKey := "/root/.ssh/id_rsa"
466 //Get IP address configmap for the cluster
467 clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, clientset)
469 fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
470 return reconcile.Result{}, err
473 for hostLabel, ipAddress := range clusterConfigMapData {
475 if strings.Contains(hostLabel, masterTag) {
476 // Its a master node, install master software
477 err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
479 fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
481 } else if strings.Contains(hostLabel, workerTag) {
482 // Its a worker node, install worker software
483 err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
485 fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
492 return reconcile.Result{}, nil
495 //Function to Get List containing baremetal hosts
496 func listBareMetalHosts(config *rest.Config) (*unstructured.UnstructuredList, error) {
498 //Create Dynamic Client for BareMetalHost CRD
499 bmhDynamicClient, err := dynamic.NewForConfig(config)
502 fmt.Println("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
503 return &unstructured.UnstructuredList{}, err
506 //Create GVR representing a BareMetalHost CR
507 bmhGVR := schema.GroupVersionResource{
510 Resource: "baremetalhosts",
513 //Get List containing all BareMetalHosts CRs
514 bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
516 fmt.Println("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
517 return &unstructured.UnstructuredList{}, err
520 return bareMetalHosts, nil
524 //Function to check if BareMetalHost containing MAC address exist
525 func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
527 //Convert macAddress to byte array for comparison
528 macAddressByte := []byte(macAddress)
531 for _, bareMetalHost := range bareMetalHostList.Items {
532 bmhJson, _ := bareMetalHost.MarshalJSON()
534 macBool = bytes.Contains(bmhJson, macAddressByte)
536 return macBool, bareMetalHost.GetName()
546 //Function to get the IP address of a host from the DHCP file
547 func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, error) {
549 //Read the dhcp lease file
550 dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
552 fmt.Println("Failed to read lease file\n")
556 dhcpLeases := string(dhcpFile)
558 //Regex to use to search dhcpLeases
559 reg := "lease.*{|ethernet.*|\n. binding state.*"
560 re, err := regexp.Compile(reg)
562 fmt.Println("Could not create Regexp object, Error %v occured\n", err)
566 //Get String containing leased Ip addresses and Corressponding MAC addresses
567 out := re.FindAllString(dhcpLeases, -1)
568 outString := strings.Join(out, " ")
569 stringReplacer := strings.NewReplacer("lease", "", "ethernet ", "", ";", "",
570 " binding state", "", "{", "")
571 replaced := stringReplacer.Replace(outString)
572 ipMacList := strings.Fields(replaced)
575 //Get IP addresses corresponding to Input MAC Address
576 for idx := len(ipMacList)-1 ; idx >= 0; idx -- {
577 item := ipMacList[idx]
578 if item == macAddress {
580 leaseState := ipMacList[idx -1]
581 if leaseState != "active" {
582 err := fmt.Errorf("No active ip address lease found for MAC address %s \n", macAddress)
583 fmt.Printf("%v\n", err)
586 ipAdd := ipMacList[idx - 2]
594 //Function to create configmap
595 func createConfigMap(data, labels map[string]string, namespace string, clientset *kubernetes.Clientset) error{
597 configmapClient := clientset.CoreV1().ConfigMaps(namespace)
599 configmap := &corev1.ConfigMap{
601 ObjectMeta: metav1.ObjectMeta{
602 Name: labels["cluster"] + "-configmap",
609 _, err := configmapClient.Create(configmap)
618 //Function to get configmap Data
619 func getConfigMapData(namespace, clusterName string, clientset *kubernetes.Clientset) (map[string]string, error) {
621 configmapClient := clientset.CoreV1().ConfigMaps(namespace)
622 configmapName := clusterName + "-configmap"
623 clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{})
628 configmapData := clusterConfigmap.Data
629 return configmapData, nil
632 //Function to create job for KUD installation
633 func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, clientset *kubernetes.Clientset) error{
635 var backOffLimit int32 = 0
636 var privi bool = true
639 jobClient := clientset.BatchV1().Jobs("default")
643 ObjectMeta: metav1.ObjectMeta{
644 Name: "kud-" + clusterName,
647 Spec: batchv1.JobSpec{
648 Template: corev1.PodTemplateSpec{
649 ObjectMeta: metav1.ObjectMeta{
654 Spec: corev1.PodSpec{
656 Containers: []corev1.Container{{
658 Image: "github.com/onap/multicloud-k8s:latest",
659 ImagePullPolicy: "IfNotPresent",
660 VolumeMounts: []corev1.VolumeMount{{
661 Name: "multi-cluster",
662 MountPath: "/opt/kud/multi-cluster",
665 Name: "secret-volume",
670 Command: []string{"/bin/sh","-c"},
671 Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh; ./installer --cluster " + clusterName},
672 SecurityContext: &corev1.SecurityContext{
678 Volumes: []corev1.Volume{{
679 Name: "multi-cluster",
680 VolumeSource: corev1.VolumeSource{
681 HostPath: &corev1.HostPathVolumeSource{
682 Path : "/opt/kud/multi-cluster",
685 Name: "secret-volume",
686 VolumeSource: corev1.VolumeSource{
687 Secret: &corev1.SecretVolumeSource{
688 SecretName: "ssh-key-secret",
692 RestartPolicy: "Never",
696 BackoffLimit : &backOffLimit,
700 _, err := jobClient.Create(job)
702 fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
709 //Function to Check if job succeeded
710 func checkJob(clusterName, namespace string, data, labels map[string]string, clientset *kubernetes.Clientset) {
712 fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
713 jobName := "kud-" + clusterName
714 jobClient := clientset.BatchV1().Jobs(namespace)
717 time.Sleep(2 * time.Second)
719 job, err := jobClient.Get(jobName, metav1.GetOptions{})
721 fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
724 jobSucceeded := job.Status.Succeeded
725 jobFailed := job.Status.Failed
727 if jobSucceeded == 1 {
728 fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
730 //KUD was installed successfully create configmap to store IP address info for the cluster
731 err = createConfigMap(data, labels, namespace, clientset)
733 fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err)
740 fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
749 //Function to get software list from software CR
750 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
752 CRclusterName := softwareCR.GetLabels()["cluster"]
754 masterSofwareList := softwareCR.Spec.MasterSoftware
755 workerSoftwareList := softwareCR.Spec.WorkerSoftware
757 return CRclusterName, masterSofwareList, workerSoftwareList
760 //Function to install software in clusterHosts
761 func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
763 var installString string
764 for _, software := range softwareList {
766 switch t := software.(type){
768 installString += software.(string) + " "
770 softwareMap, errBool := software.(map[string]interface{})
772 fmt.Printf("Error occured, cannot install software %v\n", software)
774 for softwareName, versionMap := range softwareMap {
776 versionMAP, _ := versionMap.(map[string]interface{})
777 version := versionMAP["version"].(string)
778 installString += softwareName + "=" + version + " "
781 fmt.Printf("invalid format %v\n", t)
786 err := sshInstaller(installString, sshPrivateKey, ipAddress)
794 //Function to Run Installation commands via ssh
795 func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
797 buffer, err := ioutil.ReadFile(sshPrivateKey)
802 key, err := ssh.ParsePrivateKey(buffer)
807 sshConfig := &ssh.ClientConfig{
809 Auth: []ssh.AuthMethod{
813 HostKeyCallback: ssh.InsecureIgnoreHostKey(),
816 client, err := ssh.Dial("tcp", ipAddress + ":22", sshConfig)
821 session, err := client.NewSession()
826 defer session.Close()
829 cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
830 err = session.Start(cmd)
840 func listVirtletVMs() ([]VirtletVM, error) {
842 var vmPodList []VirtletVM
844 config, err := config.GetConfig()
846 fmt.Println("Could not get kube config, Error: %v\n", err)
847 return []VirtletVM{}, err
850 // create the clientset
851 clientset, err := kubernetes.NewForConfig(config)
853 fmt.Println("Could not create the client set, Error: %v\n", err)
854 return []VirtletVM{}, err
857 pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
859 fmt.Println("Could not get pod info, Error: %v\n", err)
860 return []VirtletVM{}, err
863 for _, pod := range pods.Items {
864 var podAnnotation map[string]interface{}
865 var podStatus corev1.PodStatus
866 var podDefaultNetStatus []NetworksStatus
868 annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
870 fmt.Println("Could not get pod annotations, Error: %v\n", err)
871 return []VirtletVM{}, err
874 json.Unmarshal([]byte(annotation), &podAnnotation)
875 if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
876 runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
878 podStatusJson, _ := json.Marshal(pod.Status)
879 json.Unmarshal([]byte(podStatusJson), &podStatus)
881 if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["v1.multus-cni.io/default-network"] != nil {
882 ns := podAnnotation["v1.multus-cni.io/default-network"].(string)
883 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
885 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
890 return vmPodList, nil
893 func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
895 for i := 0; i < len(vmList); i++ {
896 if vmList[i].MACaddress == macAddress {
897 return vmList[i].IPaddress, nil