13 bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
14 batchv1 "k8s.io/api/batch/v1"
15 corev1 "k8s.io/api/core/v1"
16 "k8s.io/apimachinery/pkg/api/errors"
17 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
18 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
19 "k8s.io/apimachinery/pkg/runtime"
20 "k8s.io/apimachinery/pkg/runtime/schema"
21 "k8s.io/client-go/dynamic"
23 "golang.org/x/crypto/ssh"
25 "k8s.io/client-go/kubernetes"
26 "sigs.k8s.io/controller-runtime/pkg/client"
27 "sigs.k8s.io/controller-runtime/pkg/client/config"
28 "sigs.k8s.io/controller-runtime/pkg/controller"
29 "sigs.k8s.io/controller-runtime/pkg/handler"
30 "sigs.k8s.io/controller-runtime/pkg/manager"
31 "sigs.k8s.io/controller-runtime/pkg/reconcile"
32 logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
33 "sigs.k8s.io/controller-runtime/pkg/source"
36 type VirtletVM struct {
41 type NetworksStatus struct {
42 Name string `json:"name,omitempty"`
43 Interface string `json:"interface,omitempty"`
44 Ips []string `json:"ips,omitempty"`
45 Mac string `json:"mac,omitempty"`
46 Default bool `json:"default,omitempty"`
47 Dns interface{} `json:"dns,omitempty"`
50 var log = logf.Log.WithName("controller_provisioning")
53 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
54 * business logic. Delete these comments after modifying this file.*
57 // Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
58 // and Start it when the Manager is Started.
59 func Add(mgr manager.Manager) error {
60 return add(mgr, newReconciler(mgr))
63 // newReconciler returns a new reconcile.Reconciler
64 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
66 config, err := config.GetConfig()
68 fmt.Printf("Could not get kube config, Error: %v\n", err)
71 clientSet, err := kubernetes.NewForConfig(config)
73 fmt.Printf("Could not create clientset, Error: %v\n", err)
75 bmhDynamicClient, err := dynamic.NewForConfig(config)
78 fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
81 return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient}
84 // add adds a new Controller to mgr with r as the reconcile.Reconciler
85 func add(mgr manager.Manager, r reconcile.Reconciler) error {
86 // Create a new controller
87 c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
92 // Watch for changes to primary resource Provisioning
93 err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
98 // Watch for changes to resource configmap created as a consequence of the provisioning CR
99 err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
101 OwnerType: &bpav1alpha1.Provisioning{},
108 //Watch for changes to job resource also created as a consequence of the provisioning CR
109 err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
111 OwnerType: &bpav1alpha1.Provisioning{},
118 // Watch for changes to resource software CR
119 err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
127 // blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
128 var _ reconcile.Reconciler = &ReconcileProvisioning{}
130 // ReconcileProvisioning reconciles a Provisioning object
131 type ReconcileProvisioning struct {
132 // This client, initialized using mgr.Client() above, is a split client
133 // that reads objects from the cache and writes to the apiserver
135 scheme *runtime.Scheme
136 clientset kubernetes.Interface
137 bmhClient dynamic.Interface
140 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
141 // and what is in the Provisioning.Spec
142 // TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates
143 // a Pod as an example
145 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
146 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
147 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
148 reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
150 reqLogger.Info("Reconciling Custom Resource")
152 // Fetch the Provisioning instance
153 provisioningInstance := &bpav1alpha1.Provisioning{}
154 softwareInstance := &bpav1alpha1.Software{}
155 err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
156 provisioningCreated := true
159 //Check if its a Software Instance
160 err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
162 if errors.IsNotFound(err) {
163 // Request object not found, could have been deleted after reconcile request.
164 // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
165 // Return and don't requeue
166 return reconcile.Result{}, nil
169 // Error reading the object - requeue the request.
170 return reconcile.Result{}, err
173 //No error occured and so a Software CR was created not a Provisoning CR
174 provisioningCreated = false
177 masterTag := "MASTER_"
178 workerTag := "WORKER_"
180 if provisioningCreated {
182 ///////////////////////////////////////////////////////////////////////////////////////////////
183 //////////////// Provisioning CR was created so install KUD /////////////////
184 //////////////////////////////////////////////////////////////////////////////////////////////
185 clusterName := provisioningInstance.Labels["cluster"]
186 clusterType := provisioningInstance.Labels["cluster-type"]
187 mastersList := provisioningInstance.Spec.Masters
188 workersList := provisioningInstance.Spec.Workers
189 kudPlugins := provisioningInstance.Spec.KUDPlugins
190 podSubnet := provisioningInstance.Spec.PodSubnet
192 bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
193 virtletVMList, _ := listVirtletVMs(r.clientset)
196 var masterString string
197 var workerString string
199 multiClusterDir := "/multi-cluster"
201 //Create Directory for the specific cluster
202 clusterDir := multiClusterDir + "/" + clusterName
203 os.MkdirAll(clusterDir, os.ModePerm)
205 //Create Maps to be used for cluster ip address to label configmap
206 clusterLabel := make(map[string]string)
207 clusterLabel["cluster"] = clusterName
208 clusterData := make(map[string]string)
210 //Iterate through mastersList and get all the mac addresses and IP addresses
211 for _, masterMap := range mastersList {
213 for masterLabel, master := range masterMap {
214 masterMAC := master.MACaddress
218 err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
219 return reconcile.Result{}, err
222 containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
224 //Check 'cluster-type' label for Virtlet VMs
225 if clusterType == "virtlet-vm" {
226 //Get VM IP address of master
227 hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
228 if err != nil || hostIPaddress == "" {
229 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
230 return reconcile.Result{}, err
237 if clusterType != "virtlet-vm" {
238 fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
240 //Get IP address of master
241 hostIPaddress, err = getHostIPaddress(bareMetalHostList, masterMAC)
242 if err != nil || hostIPaddress == "" {
243 err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
244 return reconcile.Result{}, err
246 allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
249 if clusterType == "virtlet-vm" {
250 allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
252 masterString += masterLabel + "\n"
253 clusterData[masterTag+masterLabel] = hostIPaddress
255 fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
257 if len(workersList) != 0 {
259 //Iterate through workersList and get all the mac addresses
260 for _, workerMap := range workersList {
262 //Get worker labels from the workermap
263 for workerLabel, worker := range workerMap {
265 //Check if workerString already contains worker label
266 containsWorkerLabel := strings.Contains(workerString, workerLabel)
267 workerMAC := worker.MACaddress
270 //Error occurs if the same label is given to different hosts (assumption,
271 //each MAC address represents a unique host
272 if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
273 if containsWorkerLabel {
274 strings.ReplaceAll(workerString, workerLabel, "")
276 err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
277 different label to node with MACAddress %s`, workerLabel, workerMAC)
278 return reconcile.Result{}, err
280 //same node performs worker and master roles
281 } else if workerLabel == masterLabel && !containsWorkerLabel {
282 workerString += workerLabel + "\n"
284 //Add host to ip address config map with worker tag
285 hostIPaddress = clusterData[masterTag+masterLabel]
286 clusterData[workerTag+masterLabel] = hostIPaddress
288 //Error occurs if the same node is given different labels
289 } else if workerLabel != masterLabel && workerMAC == masterMAC {
290 if containsWorkerLabel {
291 strings.ReplaceAll(workerString, workerLabel, "")
293 err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
294 different label to node with MACAddress %s`, workerLabel, workerMAC)
295 return reconcile.Result{}, err
297 //worker node is different from any master node and it has not been added to the worker list
298 } else if workerLabel != masterLabel && !containsWorkerLabel {
300 // Error occurs if MAC address not provided for worker node not matching master
302 err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
303 return reconcile.Result{}, err
306 containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
308 if clusterType == "virtlet-vm" {
309 //Get VM IP address of master
310 hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
311 if err != nil || hostIPaddress == "" {
312 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
313 return reconcile.Result{}, err
320 if clusterType != "virtlet-vm" {
321 fmt.Printf("Host %s matches that macAddress\n", bmhCR)
323 //Get IP address of worker
324 hostIPaddress, err = getHostIPaddress(bareMetalHostList, workerMAC)
326 fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
327 return reconcile.Result{}, err
329 allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
331 fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
333 if clusterType == "virtlet-vm" {
334 allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
336 workerString += workerLabel + "\n"
337 clusterData[workerTag+workerLabel] = hostIPaddress
339 //No host found that matches the worker MAC
342 err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
343 return reconcile.Result{}, err
348 //No worker node specified, add master as worker node
349 } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
350 workerString += masterLabel + "\n"
352 //Add host to ip address config map with worker tag
353 hostIPaddress = clusterData[masterTag+masterLabel]
354 clusterData[workerTag+masterLabel] = hostIPaddress
357 //No host matching master MAC found
359 err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
360 return reconcile.Result{}, err
365 //Create host.ini file
366 //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini"
367 iniHostFilePath := clusterDir + "/hosts.ini"
368 newFile, err := os.Create(iniHostFilePath)
369 defer newFile.Close()
372 fmt.Printf("Error occured while creating file \n %v", err)
373 return reconcile.Result{}, err
376 hostFile, err := ini.Load(iniHostFilePath)
378 fmt.Printf("Error occured while Loading file \n %v", err)
379 return reconcile.Result{}, err
382 _, err = hostFile.NewRawSection("all", allString)
384 fmt.Printf("Error occured while creating section \n %v", err)
385 return reconcile.Result{}, err
387 _, err = hostFile.NewRawSection("kube-master", masterString)
389 fmt.Printf("Error occured while creating section \n %v", err)
390 return reconcile.Result{}, err
393 _, err = hostFile.NewRawSection("kube-node", workerString)
395 fmt.Printf("Error occured while creating section \n %v", err)
396 return reconcile.Result{}, err
399 _, err = hostFile.NewRawSection("etcd", masterString)
401 fmt.Printf("Error occured while creating section \n %v", err)
402 return reconcile.Result{}, err
405 if clusterType != "virtlet-vm" {
406 _, err = hostFile.NewRawSection("ovn-central", masterString)
408 fmt.Printf("Error occured while creating section \n %v", err)
409 return reconcile.Result{}, err
412 _, err = hostFile.NewRawSection("ovn-controller", workerString)
414 fmt.Printf("Error occured while creating section \n %v", err)
415 return reconcile.Result{}, err
418 _, err = hostFile.NewRawSection("virtlet", workerString)
420 fmt.Printf("Error occured while creating section \n %v", err)
421 return reconcile.Result{}, err
424 _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n"+"kube-master")
426 fmt.Printf("Error occured while creating section \n %v", err)
427 return reconcile.Result{}, err
430 //Create host.ini file for KUD
431 hostFile.SaveTo(iniHostFilePath)
434 err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, podSubnet, kudPlugins, r.clientset)
436 fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
437 return reconcile.Result{}, err
440 //Start separate thread to keep checking job status, Create an IP address configmap
441 //for cluster if KUD is successfully installed
442 go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset)
444 return reconcile.Result{}, nil
448 ///////////////////////////////////////////////////////////////////////////////////////////////
449 //////////////// Software CR was created so install software /////////////////
450 //////////////////////////////////////////////////////////////////////////////////////////////
451 softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
452 defaultSSHPrivateKey := "/root/.ssh/id_rsa"
454 //Get IP address configmap for the cluster
455 clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset)
457 fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
458 return reconcile.Result{}, err
461 for hostLabel, ipAddress := range clusterConfigMapData {
463 if strings.Contains(hostLabel, masterTag) {
464 // Its a master node, install master software
465 err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
467 fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
469 } else if strings.Contains(hostLabel, workerTag) {
470 // Its a worker node, install worker software
471 err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
473 fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
480 return reconcile.Result{}, nil
483 //Function to Get List containing baremetal hosts
484 func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) {
486 //Create GVR representing a BareMetalHost CR
487 bmhGVR := schema.GroupVersionResource{
490 Resource: "baremetalhosts",
493 //Get List containing all BareMetalHosts CRs
494 bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
496 fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
497 return &unstructured.UnstructuredList{}, err
500 return bareMetalHosts, nil
503 //Function to check if BareMetalHost containing MAC address exist
504 func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
506 //Convert macAddress to byte array for comparison
507 macAddressByte := []byte(macAddress)
510 for _, bareMetalHost := range bareMetalHostList.Items {
511 bmhJson, _ := bareMetalHost.MarshalJSON()
513 macBool = bytes.Contains(bmhJson, macAddressByte)
515 return macBool, bareMetalHost.GetName()
524 //Function to get the IP address of a host from the BareMetalHost resource
525 func getHostIPaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (string, error) {
527 for _, bareMetalHost := range bareMetalHostList.Items {
528 status, ok := bareMetalHost.Object["status"].(map[string]interface{})
532 hardware, ok := status["hardware"].(map[string]interface{})
536 nics, ok := hardware["nics"].([]interface{})
540 for _, nic := range nics {
541 n, ok := nic.(map[string]interface{})
545 ip, ok := n["ip"].(string)
549 if macAddress == n["mac"] {
557 //Function to create configmap
558 func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error {
560 configmapClient := clientset.CoreV1().ConfigMaps(namespace)
562 configmap := &corev1.ConfigMap{
564 ObjectMeta: metav1.ObjectMeta{
565 Name: labels["cluster"] + "-configmap",
571 _, err := configmapClient.Create(configmap)
580 //Function to get configmap Data
581 func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) {
583 configmapClient := clientset.CoreV1().ConfigMaps(namespace)
584 configmapName := clusterName + "-configmap"
585 clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{})
590 configmapData := clusterConfigmap.Data
591 return configmapData, nil
594 //Function to create job for KUD installation
595 func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, podSubnet string, kudPlugins []string, clientset kubernetes.Interface) error {
597 var backOffLimit int32 = 0
598 var privi bool = true
600 installerString := " ./installer --cluster " + clusterName
601 if len(podSubnet) > 0 {
602 installerString += " --network " + podSubnet
605 // Check if any plugin was specified
606 if len(kudPlugins) > 0 {
607 plugins := " --plugins"
609 for _, plug := range kudPlugins {
610 plugins += " " + plug
613 installerString += plugins
616 jobClient := clientset.BatchV1().Jobs("default")
620 ObjectMeta: metav1.ObjectMeta{
621 Name: "kud-" + clusterName,
624 Spec: batchv1.JobSpec{
625 Template: corev1.PodTemplateSpec{
626 ObjectMeta: metav1.ObjectMeta{
630 Spec: corev1.PodSpec{
632 Containers: []corev1.Container{{
634 Image: "github.com/onap/multicloud-k8s:latest",
635 ImagePullPolicy: "IfNotPresent",
636 VolumeMounts: []corev1.VolumeMount{{
637 Name: "multi-cluster",
638 MountPath: "/opt/kud/multi-cluster",
641 Name: "secret-volume",
645 EnvFrom: []corev1.EnvFromSource{
647 ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "kud-installer"}},
650 Command: []string{"/bin/sh", "-c"},
651 Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
652 SecurityContext: &corev1.SecurityContext{
657 Volumes: []corev1.Volume{{
658 Name: "multi-cluster",
659 VolumeSource: corev1.VolumeSource{
660 HostPath: &corev1.HostPathVolumeSource{
661 Path: "/opt/kud/multi-cluster",
664 Name: "secret-volume",
665 VolumeSource: corev1.VolumeSource{
666 Secret: &corev1.SecretVolumeSource{
667 SecretName: "ssh-key-secret",
670 RestartPolicy: "Never",
673 BackoffLimit: &backOffLimit,
676 _, err := jobClient.Create(job)
678 fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
685 //Function to Check if job succeeded
686 func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) {
688 fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
689 jobName := "kud-" + clusterName
690 jobClient := clientset.BatchV1().Jobs(namespace)
693 time.Sleep(2 * time.Second)
695 job, err := jobClient.Get(jobName, metav1.GetOptions{})
697 fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
700 jobSucceeded := job.Status.Succeeded
701 jobFailed := job.Status.Failed
703 if jobSucceeded == 1 {
704 fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
706 //KUD was installed successfully create configmap to store IP address info for the cluster
707 err = createConfigMap(data, labels, namespace, clientset)
709 fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err)
716 fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
725 //Function to get software list from software CR
726 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
728 CRclusterName := softwareCR.GetLabels()["cluster"]
730 masterSofwareList := softwareCR.Spec.MasterSoftware
731 workerSoftwareList := softwareCR.Spec.WorkerSoftware
733 return CRclusterName, masterSofwareList, workerSoftwareList
736 //Function to install software in clusterHosts
737 func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
739 var installString string
740 for _, software := range softwareList {
742 switch t := software.(type) {
744 installString += software.(string) + " "
746 softwareMap, errBool := software.(map[string]interface{})
748 fmt.Printf("Error occured, cannot install software %v\n", software)
750 for softwareName, versionMap := range softwareMap {
752 versionMAP, _ := versionMap.(map[string]interface{})
753 version := versionMAP["version"].(string)
754 installString += softwareName + "=" + version + " "
757 fmt.Printf("invalid format %v\n", t)
762 err := sshInstaller(installString, sshPrivateKey, ipAddress)
770 //Function to Run Installation commands via ssh
771 func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
773 buffer, err := ioutil.ReadFile(sshPrivateKey)
778 key, err := ssh.ParsePrivateKey(buffer)
783 sshConfig := &ssh.ClientConfig{
785 Auth: []ssh.AuthMethod{
789 HostKeyCallback: ssh.InsecureIgnoreHostKey(),
792 client, err := ssh.Dial("tcp", ipAddress+":22", sshConfig)
797 session, err := client.NewSession()
802 defer session.Close()
805 cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
806 err = session.Start(cmd)
816 func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
818 var vmPodList []VirtletVM
820 pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
822 fmt.Printf("Could not get pod info, Error: %v\n", err)
823 return []VirtletVM{}, err
826 for _, pod := range pods.Items {
827 var podAnnotation map[string]interface{}
828 var podStatus corev1.PodStatus
829 var podDefaultNetStatus []NetworksStatus
831 annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
833 fmt.Printf("Could not get pod annotations, Error: %v\n", err)
834 return []VirtletVM{}, err
837 json.Unmarshal([]byte(annotation), &podAnnotation)
838 if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
839 runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
841 podStatusJson, _ := json.Marshal(pod.Status)
842 json.Unmarshal([]byte(podStatusJson), &podStatus)
844 if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
845 ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
846 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
848 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
853 return vmPodList, nil
856 func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
858 for i := 0; i < len(vmList); i++ {
859 if vmList[i].MACaddress == macAddress {
860 return vmList[i].IPaddress, nil