14 bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
15 batchv1 "k8s.io/api/batch/v1"
16 corev1 "k8s.io/api/core/v1"
17 "k8s.io/apimachinery/pkg/api/errors"
18 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
19 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
20 "k8s.io/apimachinery/pkg/runtime"
21 "k8s.io/apimachinery/pkg/runtime/schema"
22 "k8s.io/client-go/dynamic"
24 "golang.org/x/crypto/ssh"
26 "k8s.io/client-go/kubernetes"
27 "sigs.k8s.io/controller-runtime/pkg/client"
28 "sigs.k8s.io/controller-runtime/pkg/client/config"
29 "sigs.k8s.io/controller-runtime/pkg/controller"
30 "sigs.k8s.io/controller-runtime/pkg/handler"
31 "sigs.k8s.io/controller-runtime/pkg/manager"
32 "sigs.k8s.io/controller-runtime/pkg/reconcile"
33 logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
34 "sigs.k8s.io/controller-runtime/pkg/source"
37 type VirtletVM struct {
42 type NetworksStatus struct {
43 Name string `json:"name,omitempty"`
44 Interface string `json:"interface,omitempty"`
45 Ips []string `json:"ips,omitempty"`
46 Mac string `json:"mac,omitempty"`
47 Default bool `json:"default,omitempty"`
48 Dns interface{} `json:"dns,omitempty"`
51 var log = logf.Log.WithName("controller_provisioning")
54 * USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller
55 * business logic. Delete these comments after modifying this file.*
58 // Add creates a new Provisioning Controller and adds it to the Manager. The Manager will set fields on the Controller
59 // and Start it when the Manager is Started.
60 func Add(mgr manager.Manager) error {
61 return add(mgr, newReconciler(mgr))
64 // newReconciler returns a new reconcile.Reconciler
65 func newReconciler(mgr manager.Manager) reconcile.Reconciler {
67 config, err := config.GetConfig()
69 fmt.Printf("Could not get kube config, Error: %v\n", err)
72 clientSet, err := kubernetes.NewForConfig(config)
74 fmt.Printf("Could not create clientset, Error: %v\n", err)
76 bmhDynamicClient, err := dynamic.NewForConfig(config)
79 fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
82 return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient}
85 // add adds a new Controller to mgr with r as the reconcile.Reconciler
86 func add(mgr manager.Manager, r reconcile.Reconciler) error {
87 // Create a new controller
88 c, err := controller.New("provisioning-controller", mgr, controller.Options{Reconciler: r})
93 // Watch for changes to primary resource Provisioning
94 err = c.Watch(&source.Kind{Type: &bpav1alpha1.Provisioning{}}, &handler.EnqueueRequestForObject{})
99 // Watch for changes to resource configmap created as a consequence of the provisioning CR
100 err = c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{
102 OwnerType: &bpav1alpha1.Provisioning{},
109 //Watch for changes to job resource also created as a consequence of the provisioning CR
110 err = c.Watch(&source.Kind{Type: &batchv1.Job{}}, &handler.EnqueueRequestForOwner{
112 OwnerType: &bpav1alpha1.Provisioning{},
119 // Watch for changes to resource software CR
120 err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
128 // blank assignment to verify that ReconcileProvisioning implements reconcile.Reconciler
129 var _ reconcile.Reconciler = &ReconcileProvisioning{}
131 // ReconcileProvisioning reconciles a Provisioning object
132 type ReconcileProvisioning struct {
133 // This client, initialized using mgr.Client() above, is a split client
134 // that reads objects from the cache and writes to the apiserver
136 scheme *runtime.Scheme
137 clientset kubernetes.Interface
138 bmhClient dynamic.Interface
141 // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
142 // and what is in the Provisioning.Spec
143 // TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates
144 // a Pod as an example
146 // The Controller will requeue the Request to be processed again if the returned error is non-nil or
147 // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
148 func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
149 reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
151 reqLogger.Info("Reconciling Custom Resource")
153 // Fetch the Provisioning instance
154 provisioningInstance := &bpav1alpha1.Provisioning{}
155 softwareInstance := &bpav1alpha1.Software{}
156 err := r.client.Get(context.TODO(), request.NamespacedName, provisioningInstance)
157 provisioningCreated := true
160 //Check if its a Software Instance
161 err = r.client.Get(context.TODO(), request.NamespacedName, softwareInstance)
163 if errors.IsNotFound(err) {
164 // Request object not found, could have been deleted after reconcile request.
165 // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
166 // Return and don't requeue
167 return reconcile.Result{}, nil
170 // Error reading the object - requeue the request.
171 return reconcile.Result{}, err
174 //No error occured and so a Software CR was created not a Provisoning CR
175 provisioningCreated = false
178 masterTag := "MASTER_"
179 workerTag := "WORKER_"
181 if provisioningCreated {
183 ///////////////////////////////////////////////////////////////////////////////////////////////
184 //////////////// Provisioning CR was created so install KUD /////////////////
185 //////////////////////////////////////////////////////////////////////////////////////////////
186 clusterName := provisioningInstance.Labels["cluster"]
187 clusterType := provisioningInstance.Labels["cluster-type"]
188 mastersList := provisioningInstance.Spec.Masters
189 workersList := provisioningInstance.Spec.Workers
190 kudPlugins := provisioningInstance.Spec.KUDPlugins
191 podSubnet := provisioningInstance.Spec.PodSubnet
193 bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
194 virtletVMList, _ := listVirtletVMs(r.clientset)
197 var masterString string
198 var workerString string
200 dhcpLeaseFile := "/var/lib/dhcp/dhcpd.leases"
201 multiClusterDir := "/multi-cluster"
203 //Create Directory for the specific cluster
204 clusterDir := multiClusterDir + "/" + clusterName
205 os.MkdirAll(clusterDir, os.ModePerm)
207 //Create Maps to be used for cluster ip address to label configmap
208 clusterLabel := make(map[string]string)
209 clusterLabel["cluster"] = clusterName
210 clusterData := make(map[string]string)
212 //Iterate through mastersList and get all the mac addresses and IP addresses
213 for _, masterMap := range mastersList {
215 for masterLabel, master := range masterMap {
216 masterMAC := master.MACaddress
220 err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
221 return reconcile.Result{}, err
224 containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
226 //Check 'cluster-type' label for Virtlet VMs
227 if clusterType == "virtlet-vm" {
228 //Get VM IP address of master
229 hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
230 if err != nil || hostIPaddress == "" {
231 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
232 return reconcile.Result{}, err
239 if clusterType != "virtlet-vm" {
240 fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
242 //Get IP address of master
243 hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile)
244 if err != nil || hostIPaddress == "" {
245 err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
246 return reconcile.Result{}, err
248 allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
251 if clusterType == "virtlet-vm" {
252 allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
254 masterString += masterLabel + "\n"
255 clusterData[masterTag+masterLabel] = hostIPaddress
257 fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
259 if len(workersList) != 0 {
261 //Iterate through workersList and get all the mac addresses
262 for _, workerMap := range workersList {
264 //Get worker labels from the workermap
265 for workerLabel, worker := range workerMap {
267 //Check if workerString already contains worker label
268 containsWorkerLabel := strings.Contains(workerString, workerLabel)
269 workerMAC := worker.MACaddress
272 //Error occurs if the same label is given to different hosts (assumption,
273 //each MAC address represents a unique host
274 if workerLabel == masterLabel && workerMAC != masterMAC && workerMAC != "" {
275 if containsWorkerLabel {
276 strings.ReplaceAll(workerString, workerLabel, "")
278 err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
279 different label to node with MACAddress %s`, workerLabel, workerMAC)
280 return reconcile.Result{}, err
282 //same node performs worker and master roles
283 } else if workerLabel == masterLabel && !containsWorkerLabel {
284 workerString += workerLabel + "\n"
286 //Add host to ip address config map with worker tag
287 hostIPaddress = clusterData[masterTag+masterLabel]
288 clusterData[workerTag+masterLabel] = hostIPaddress
290 //Error occurs if the same node is given different labels
291 } else if workerLabel != masterLabel && workerMAC == masterMAC {
292 if containsWorkerLabel {
293 strings.ReplaceAll(workerString, workerLabel, "")
295 err = fmt.Errorf(`A node with label %s already exists, modify resource and assign a
296 different label to node with MACAddress %s`, workerLabel, workerMAC)
297 return reconcile.Result{}, err
299 //worker node is different from any master node and it has not been added to the worker list
300 } else if workerLabel != masterLabel && !containsWorkerLabel {
302 // Error occurs if MAC address not provided for worker node not matching master
304 err = fmt.Errorf("MAC address for worker %s not provided", workerLabel)
305 return reconcile.Result{}, err
308 containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
310 if clusterType == "virtlet-vm" {
311 //Get VM IP address of master
312 hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
313 if err != nil || hostIPaddress == "" {
314 err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
315 return reconcile.Result{}, err
322 if clusterType != "virtlet-vm" {
323 fmt.Printf("Host %s matches that macAddress\n", bmhCR)
325 //Get IP address of worker
326 hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile)
328 fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
329 return reconcile.Result{}, err
331 allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
333 fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
335 if clusterType == "virtlet-vm" {
336 allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
338 workerString += workerLabel + "\n"
339 clusterData[workerTag+workerLabel] = hostIPaddress
341 //No host found that matches the worker MAC
344 err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
345 return reconcile.Result{}, err
350 //No worker node specified, add master as worker node
351 } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
352 workerString += masterLabel + "\n"
354 //Add host to ip address config map with worker tag
355 hostIPaddress = clusterData[masterTag+masterLabel]
356 clusterData[workerTag+masterLabel] = hostIPaddress
359 //No host matching master MAC found
361 err = fmt.Errorf("Host with MAC Address %s not found\n", masterMAC)
362 return reconcile.Result{}, err
367 //Create host.ini file
368 //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini"
369 iniHostFilePath := clusterDir + "/hosts.ini"
370 newFile, err := os.Create(iniHostFilePath)
371 defer newFile.Close()
374 fmt.Printf("Error occured while creating file \n %v", err)
375 return reconcile.Result{}, err
378 hostFile, err := ini.Load(iniHostFilePath)
380 fmt.Printf("Error occured while Loading file \n %v", err)
381 return reconcile.Result{}, err
384 _, err = hostFile.NewRawSection("all", allString)
386 fmt.Printf("Error occured while creating section \n %v", err)
387 return reconcile.Result{}, err
389 _, err = hostFile.NewRawSection("kube-master", masterString)
391 fmt.Printf("Error occured while creating section \n %v", err)
392 return reconcile.Result{}, err
395 _, err = hostFile.NewRawSection("kube-node", workerString)
397 fmt.Printf("Error occured while creating section \n %v", err)
398 return reconcile.Result{}, err
401 _, err = hostFile.NewRawSection("etcd", masterString)
403 fmt.Printf("Error occured while creating section \n %v", err)
404 return reconcile.Result{}, err
407 if clusterType != "virtlet-vm" {
408 _, err = hostFile.NewRawSection("ovn-central", masterString)
410 fmt.Printf("Error occured while creating section \n %v", err)
411 return reconcile.Result{}, err
414 _, err = hostFile.NewRawSection("ovn-controller", workerString)
416 fmt.Printf("Error occured while creating section \n %v", err)
417 return reconcile.Result{}, err
420 _, err = hostFile.NewRawSection("virtlet", workerString)
422 fmt.Printf("Error occured while creating section \n %v", err)
423 return reconcile.Result{}, err
426 _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n"+"kube-master")
428 fmt.Printf("Error occured while creating section \n %v", err)
429 return reconcile.Result{}, err
432 //Create host.ini file for KUD
433 hostFile.SaveTo(iniHostFilePath)
436 err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, podSubnet, kudPlugins, r.clientset)
438 fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
439 return reconcile.Result{}, err
442 //Start separate thread to keep checking job status, Create an IP address configmap
443 //for cluster if KUD is successfully installed
444 go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset)
446 return reconcile.Result{}, nil
450 ///////////////////////////////////////////////////////////////////////////////////////////////
451 //////////////// Software CR was created so install software /////////////////
452 //////////////////////////////////////////////////////////////////////////////////////////////
453 softwareClusterName, masterSoftwareList, workerSoftwareList := getSoftwareList(softwareInstance)
454 defaultSSHPrivateKey := "/root/.ssh/id_rsa"
456 //Get IP address configmap for the cluster
457 clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset)
459 fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
460 return reconcile.Result{}, err
463 for hostLabel, ipAddress := range clusterConfigMapData {
465 if strings.Contains(hostLabel, masterTag) {
466 // Its a master node, install master software
467 err = softwareInstaller(ipAddress, defaultSSHPrivateKey, masterSoftwareList)
469 fmt.Printf("Error occured while installing master software in host %s, ERROR: %v\n", hostLabel, err)
471 } else if strings.Contains(hostLabel, workerTag) {
472 // Its a worker node, install worker software
473 err = softwareInstaller(ipAddress, defaultSSHPrivateKey, workerSoftwareList)
475 fmt.Printf("Error occured while installing worker software in host %s, ERROR: %v\n", hostLabel, err)
482 return reconcile.Result{}, nil
485 //Function to Get List containing baremetal hosts
486 func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) {
488 //Create GVR representing a BareMetalHost CR
489 bmhGVR := schema.GroupVersionResource{
492 Resource: "baremetalhosts",
495 //Get List containing all BareMetalHosts CRs
496 bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
498 fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
499 return &unstructured.UnstructuredList{}, err
502 return bareMetalHosts, nil
505 //Function to check if BareMetalHost containing MAC address exist
506 func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddress string) (bool, string) {
508 //Convert macAddress to byte array for comparison
509 macAddressByte := []byte(macAddress)
512 for _, bareMetalHost := range bareMetalHostList.Items {
513 bmhJson, _ := bareMetalHost.MarshalJSON()
515 macBool = bytes.Contains(bmhJson, macAddressByte)
517 return macBool, bareMetalHost.GetName()
526 //Function to get the IP address of a host from the DHCP file
527 func getHostIPaddress(macAddress string, dhcpLeaseFilePath string) (string, error) {
529 //Read the dhcp lease file
530 dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
532 fmt.Printf("Failed to read lease file\n")
536 dhcpLeases := string(dhcpFile)
538 //Regex to use to search dhcpLeases
539 reg := "lease.*{|ethernet.*|\n. binding state.*"
540 re, err := regexp.Compile(reg)
542 fmt.Printf("Could not create Regexp object, Error %v occured\n", err)
546 //Get String containing leased Ip addresses and Corressponding MAC addresses
547 out := re.FindAllString(dhcpLeases, -1)
548 outString := strings.Join(out, " ")
549 stringReplacer := strings.NewReplacer("lease", "", "ethernet ", "", ";", "",
550 " binding state", "", "{", "")
551 replaced := stringReplacer.Replace(outString)
552 ipMacList := strings.Fields(replaced)
554 //Get IP addresses corresponding to Input MAC Address
555 for idx := len(ipMacList) - 1; idx >= 0; idx-- {
556 item := ipMacList[idx]
557 if item == macAddress {
559 leaseState := ipMacList[idx-1]
560 if leaseState != "active" {
561 err := fmt.Errorf("No active ip address lease found for MAC address %s \n", macAddress)
562 fmt.Printf("%v\n", err)
565 ipAdd := ipMacList[idx-2]
573 //Function to create configmap
574 func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error {
576 configmapClient := clientset.CoreV1().ConfigMaps(namespace)
578 configmap := &corev1.ConfigMap{
580 ObjectMeta: metav1.ObjectMeta{
581 Name: labels["cluster"] + "-configmap",
587 _, err := configmapClient.Create(configmap)
596 //Function to get configmap Data
597 func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) {
599 configmapClient := clientset.CoreV1().ConfigMaps(namespace)
600 configmapName := clusterName + "-configmap"
601 clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{})
606 configmapData := clusterConfigmap.Data
607 return configmapData, nil
610 //Function to create job for KUD installation
611 func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, podSubnet string, kudPlugins []string, clientset kubernetes.Interface) error {
613 var backOffLimit int32 = 0
614 var privi bool = true
616 installerString := " ./installer --cluster " + clusterName
617 if len(podSubnet) > 0 {
618 installerString += " --network " + podSubnet
621 // Check if any plugin was specified
622 if len(kudPlugins) > 0 {
623 plugins := " --plugins"
625 for _, plug := range kudPlugins {
626 plugins += " " + plug
629 installerString += plugins
632 jobClient := clientset.BatchV1().Jobs("default")
636 ObjectMeta: metav1.ObjectMeta{
637 Name: "kud-" + clusterName,
640 Spec: batchv1.JobSpec{
641 Template: corev1.PodTemplateSpec{
642 ObjectMeta: metav1.ObjectMeta{
646 Spec: corev1.PodSpec{
648 Containers: []corev1.Container{{
650 Image: "github.com/onap/multicloud-k8s:latest",
651 ImagePullPolicy: "IfNotPresent",
652 VolumeMounts: []corev1.VolumeMount{{
653 Name: "multi-cluster",
654 MountPath: "/opt/kud/multi-cluster",
657 Name: "secret-volume",
661 EnvFrom: []corev1.EnvFromSource{
663 ConfigMapRef: &corev1.ConfigMapEnvSource{LocalObjectReference: corev1.LocalObjectReference{Name: "kud-installer"}},
666 Command: []string{"/bin/sh", "-c"},
667 Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
668 SecurityContext: &corev1.SecurityContext{
673 Volumes: []corev1.Volume{{
674 Name: "multi-cluster",
675 VolumeSource: corev1.VolumeSource{
676 HostPath: &corev1.HostPathVolumeSource{
677 Path: "/opt/kud/multi-cluster",
680 Name: "secret-volume",
681 VolumeSource: corev1.VolumeSource{
682 Secret: &corev1.SecretVolumeSource{
683 SecretName: "ssh-key-secret",
686 RestartPolicy: "Never",
689 BackoffLimit: &backOffLimit,
692 _, err := jobClient.Create(job)
694 fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
701 //Function to Check if job succeeded
702 func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) {
704 fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
705 jobName := "kud-" + clusterName
706 jobClient := clientset.BatchV1().Jobs(namespace)
709 time.Sleep(2 * time.Second)
711 job, err := jobClient.Get(jobName, metav1.GetOptions{})
713 fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
716 jobSucceeded := job.Status.Succeeded
717 jobFailed := job.Status.Failed
719 if jobSucceeded == 1 {
720 fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
722 //KUD was installed successfully create configmap to store IP address info for the cluster
723 err = createConfigMap(data, labels, namespace, clientset)
725 fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err)
732 fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
741 //Function to get software list from software CR
742 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
744 CRclusterName := softwareCR.GetLabels()["cluster"]
746 masterSofwareList := softwareCR.Spec.MasterSoftware
747 workerSoftwareList := softwareCR.Spec.WorkerSoftware
749 return CRclusterName, masterSofwareList, workerSoftwareList
752 //Function to install software in clusterHosts
753 func softwareInstaller(ipAddress, sshPrivateKey string, softwareList []interface{}) error {
755 var installString string
756 for _, software := range softwareList {
758 switch t := software.(type) {
760 installString += software.(string) + " "
762 softwareMap, errBool := software.(map[string]interface{})
764 fmt.Printf("Error occured, cannot install software %v\n", software)
766 for softwareName, versionMap := range softwareMap {
768 versionMAP, _ := versionMap.(map[string]interface{})
769 version := versionMAP["version"].(string)
770 installString += softwareName + "=" + version + " "
773 fmt.Printf("invalid format %v\n", t)
778 err := sshInstaller(installString, sshPrivateKey, ipAddress)
786 //Function to Run Installation commands via ssh
787 func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
789 buffer, err := ioutil.ReadFile(sshPrivateKey)
794 key, err := ssh.ParsePrivateKey(buffer)
799 sshConfig := &ssh.ClientConfig{
801 Auth: []ssh.AuthMethod{
805 HostKeyCallback: ssh.InsecureIgnoreHostKey(),
808 client, err := ssh.Dial("tcp", ipAddress+":22", sshConfig)
813 session, err := client.NewSession()
818 defer session.Close()
821 cmd := "sudo apt-get update && apt-get install " + softwareString + "-y"
822 err = session.Start(cmd)
832 func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
834 var vmPodList []VirtletVM
836 pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
838 fmt.Printf("Could not get pod info, Error: %v\n", err)
839 return []VirtletVM{}, err
842 for _, pod := range pods.Items {
843 var podAnnotation map[string]interface{}
844 var podStatus corev1.PodStatus
845 var podDefaultNetStatus []NetworksStatus
847 annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
849 fmt.Printf("Could not get pod annotations, Error: %v\n", err)
850 return []VirtletVM{}, err
853 json.Unmarshal([]byte(annotation), &podAnnotation)
854 if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
855 runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
857 podStatusJson, _ := json.Marshal(pod.Status)
858 json.Unmarshal([]byte(podStatusJson), &podStatus)
860 if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
861 ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
862 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
864 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
869 return vmPodList, nil
872 func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
874 for i := 0; i < len(vmList); i++ {
875 if vmList[i].MACaddress == macAddress {
876 return vmList[i].IPaddress, nil