X-Git-Url: https://gerrit.akraino.org/r/gitweb?a=blobdiff_plain;f=cmd%2Fbpa-operator%2Fpkg%2Fcontroller%2Fprovisioning%2Fprovisioning_controller.go;h=b4e95779ecd7defa3cfd77f28763115c32ab9054;hb=refs%2Fchanges%2F65%2F1865%2F33;hp=4822db8430ed1232bff8aecc50cc9c197d53b22b;hpb=7d22c318f4f0a1d96ba852ec7fca6559e8a7ca05;p=icn.git diff --git a/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go b/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go index 4822db8..b4e9577 100644 --- a/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go +++ b/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go @@ -9,7 +9,7 @@ import ( "regexp" "strings" "io/ioutil" - + "encoding/json" bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/dynamic" - "k8s.io/client-go/rest" "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" @@ -35,6 +34,20 @@ import ( "golang.org/x/crypto/ssh" ) +type VirtletVM struct { + IPaddress string + MACaddress string +} + +type NetworksStatus struct { + Name string `json:"name,omitempty"` + Interface string `json:"interface,omitempty"` + Ips []string `json:"ips,omitempty"` + Mac string `json:"mac,omitempty"` + Default bool `json:"default,omitempty"` + Dns interface{} `json:"dns,omitempty"` +} + var log = logf.Log.WithName("controller_provisioning") /** @@ -50,7 +63,23 @@ func Add(mgr manager.Manager) error { // newReconciler returns a new reconcile.Reconciler func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme()} + + config, err := config.GetConfig() + if err != nil { + fmt.Printf("Could not get kube config, Error: %v\n", err) + } + + clientSet, err := kubernetes.NewForConfig(config) + if err != nil { + fmt.Printf("Could not create clientset, Error: %v\n", err) + } + bmhDynamicClient, err := dynamic.NewForConfig(config) + + if err != nil { + fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err) + } + + return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient } } // add adds a new Controller to mgr with r as the reconcile.Reconciler @@ -87,7 +116,6 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { return err } - // Watch for changes to resource software CR err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{}) if err != nil { @@ -95,7 +123,6 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error { } - return nil } @@ -108,6 +135,8 @@ type ReconcileProvisioning struct { // that reads objects from the cache and writes to the apiserver client client.Client scheme *runtime.Scheme + clientset kubernetes.Interface + bmhClient dynamic.Interface } // Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read @@ -119,7 +148,6 @@ type ReconcileProvisioning struct { // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) { reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) - //reqLogger.Info("Reconciling Provisioning") fmt.Printf("\n\n") reqLogger.Info("Reconciling Custom Resource") @@ -154,53 +182,30 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. masterTag := "MASTER_" workerTag := "WORKER_" - config, err := config.GetConfig() - if err != nil { - fmt.Printf("Could not get kube config, Error: %v\n", err) - return reconcile.Result{}, err - } - - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - fmt.Printf("Could not create clientset, Error: %v\n", err) - return reconcile.Result{}, err - } if provisioningCreated { /////////////////////////////////////////////////////////////////////////////////////////////// //////////////// Provisioning CR was created so install KUD ///////////////// ////////////////////////////////////////////////////////////////////////////////////////////// clusterName := provisioningInstance.Labels["cluster"] + clusterType := provisioningInstance.Labels["cluster-type"] mastersList := provisioningInstance.Spec.Masters workersList := provisioningInstance.Spec.Workers - dhcpLeaseFile := provisioningInstance.Spec.DHCPleaseFile - kudInstallerScript := provisioningInstance.Spec.KUDInstaller - multiClusterDir := provisioningInstance.Spec.MultiClusterPath + kudPlugins := provisioningInstance.Spec.KUDPlugins + + + bareMetalHostList, _ := listBareMetalHosts(r.bmhClient) + virtletVMList, _ := listVirtletVMs(r.clientset) - bareMetalHostList, _ := listBareMetalHosts(config) var allString string var masterString string var workerString string - defaultDHCPFile := "/var/lib/dhcp/dhcpd.leases" - defaultKUDInstallerPath := "/multicloud-k8s/kud/hosting_providers/vagrant" - defaultMultiClusterDir := "/multi-cluster" - - //Give Default values for paths if no path is given in the CR - if dhcpLeaseFile == "" { - dhcpLeaseFile = defaultDHCPFile - } - - if kudInstallerScript == "" { - kudInstallerScript = defaultKUDInstallerPath - } - - if multiClusterDir == "" { - multiClusterDir = defaultMultiClusterDir - } + dhcpLeaseFile := "/var/lib/dhcp/dhcpd.leases" + multiClusterDir := "/multi-cluster" //Create Directory for the specific cluster clusterDir := multiClusterDir + "/" + clusterName @@ -218,39 +223,60 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. for masterLabel, master := range masterMap { masterMAC := master.MACaddress + hostIPaddress := "" if masterMAC == "" { err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel) return reconcile.Result{}, err } + containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC) + + //Check 'cluster-type' label for Virtlet VMs + if clusterType == "virtlet-vm" { + //Get VM IP address of master + hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC) + if err != nil || hostIPaddress == "" { + err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC) + return reconcile.Result{}, err + } + containsMac = true + } + if containsMac{ - fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC) - //Get IP address of master - hostIPaddress, err := getHostIPaddress(masterMAC, dhcpLeaseFile ) - if err != nil || hostIPaddress == ""{ - err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC) - return reconcile.Result{}, err - } + if clusterType != "virtlet-vm" { + fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC) + + //Get IP address of master + hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile ) + if err != nil || hostIPaddress == ""{ + err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC) + return reconcile.Result{}, err + } + } - allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n" - masterString += masterLabel + "\n" - clusterData[masterTag + masterLabel] = hostIPaddress + allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n" + if clusterType == "virtlet-vm" { + allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n" + } + masterString += masterLabel + "\n" + clusterData[masterTag + masterLabel] = hostIPaddress - fmt.Printf("%s : %s \n", hostIPaddress, masterMAC) + fmt.Printf("%s : %s \n", hostIPaddress, masterMAC) - if len(workersList) != 0 { + if len(workersList) != 0 { - //Iterate through workersList and get all the mac addresses - for _, workerMap := range workersList { + //Iterate through workersList and get all the mac addresses + for _, workerMap := range workersList { - //Get worker labels from the workermap - for workerLabel, worker := range workerMap { + //Get worker labels from the workermap + for workerLabel, worker := range workerMap { - //Check if workerString already contains worker label - containsWorkerLabel := strings.Contains(workerString, workerLabel) - workerMAC := worker.MACaddress + //Check if workerString already contains worker label + containsWorkerLabel := strings.Contains(workerString, workerLabel) + workerMAC := worker.MACaddress + hostIPaddress = "" //Error occurs if the same label is given to different hosts (assumption, //each MAC address represents a unique host @@ -289,20 +315,35 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. } containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC) - if containsMac{ - fmt.Printf("Host %s matches that macAddress\n", bmhCR) - //Get IP address of worker - hostIPaddress, err := getHostIPaddress(workerMAC, dhcpLeaseFile ) - if err != nil { - fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC) - return reconcile.Result{}, err - } - fmt.Printf("%s : %s \n", hostIPaddress, workerMAC) + if clusterType == "virtlet-vm" { + //Get VM IP address of master + hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC) + if err != nil || hostIPaddress == "" { + err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC) + return reconcile.Result{}, err + } + containsMac = true + } + + if containsMac{ + if clusterType != "virtlet-vm" { + fmt.Printf("Host %s matches that macAddress\n", bmhCR) + //Get IP address of worker + hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile ) + if err != nil { + fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC) + return reconcile.Result{}, err + } + } + fmt.Printf("%s : %s \n", hostIPaddress, workerMAC) allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n" + if clusterType == "virtlet-vm" { + allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n" + } workerString += workerLabel + "\n" clusterData[workerTag + workerLabel] = hostIPaddress @@ -312,9 +353,8 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC) return reconcile.Result{}, err } - } - - } + } + } } //No worker node specified, add master as worker node } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) { @@ -374,6 +414,24 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{}, err } + _, err = hostFile.NewRawSection("ovn-central", masterString) + if err != nil { + fmt.Printf("Error occured while creating section \n %v", err) + return reconcile.Result{}, err + } + + _, err = hostFile.NewRawSection("ovn-controller", workerString) + if err != nil { + fmt.Printf("Error occured while creating section \n %v", err) + return reconcile.Result{}, err + } + + _, err = hostFile.NewRawSection("virtlet", workerString) + if err != nil { + fmt.Printf("Error occured while creating section \n %v", err) + return reconcile.Result{}, err + } + _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master") if err != nil { fmt.Printf("Error occured while creating section \n %v", err) @@ -385,7 +443,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. hostFile.SaveTo(iniHostFilePath) //Install KUD - err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, clientset) + err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, kudPlugins, r.clientset) if err != nil { fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err) return reconcile.Result{}, err @@ -393,7 +451,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. //Start separate thread to keep checking job status, Create an IP address configmap //for cluster if KUD is successfully installed - go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, clientset) + go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset) return reconcile.Result{}, nil @@ -408,7 +466,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. defaultSSHPrivateKey := "/root/.ssh/id_rsa" //Get IP address configmap for the cluster - clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, clientset) + clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset) if err != nil { fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err) return reconcile.Result{}, err @@ -436,17 +494,8 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{}, nil } - //Function to Get List containing baremetal hosts -func listBareMetalHosts(config *rest.Config) (*unstructured.UnstructuredList, error) { - - //Create Dynamic Client for BareMetalHost CRD - bmhDynamicClient, err := dynamic.NewForConfig(config) - - if err != nil { - fmt.Println("Could not create dynamic client for bareMetalHosts, Error: %v\n", err) - return &unstructured.UnstructuredList{}, err - } +func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) { //Create GVR representing a BareMetalHost CR bmhGVR := schema.GroupVersionResource{ @@ -458,7 +507,7 @@ func listBareMetalHosts(config *rest.Config) (*unstructured.UnstructuredList, er //Get List containing all BareMetalHosts CRs bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{}) if err != nil { - fmt.Println("Error occured, cannot get BareMetalHosts list, Error: %v\n", err) + fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err) return &unstructured.UnstructuredList{}, err } @@ -494,7 +543,7 @@ func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, err //Read the dhcp lease file dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath) if err != nil { - fmt.Println("Failed to read lease file\n") + fmt.Printf("Failed to read lease file\n") return "", err } @@ -504,7 +553,7 @@ func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, err reg := "lease.*{|ethernet.*|\n. binding state.*" re, err := regexp.Compile(reg) if err != nil { - fmt.Println("Could not create Regexp object, Error %v occured\n", err) + fmt.Printf("Could not create Regexp object, Error %v occured\n", err) return "", err } @@ -537,7 +586,7 @@ func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, err } //Function to create configmap -func createConfigMap(data, labels map[string]string, namespace string, clientset *kubernetes.Clientset) error{ +func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error{ configmapClient := clientset.CoreV1().ConfigMaps(namespace) @@ -561,7 +610,7 @@ func createConfigMap(data, labels map[string]string, namespace string, clientset } //Function to get configmap Data -func getConfigMapData(namespace, clusterName string, clientset *kubernetes.Clientset) (map[string]string, error) { +func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) { configmapClient := clientset.CoreV1().ConfigMaps(namespace) configmapName := clusterName + "-configmap" @@ -574,13 +623,25 @@ func getConfigMapData(namespace, clusterName string, clientset *kubernetes.Clien return configmapData, nil } - //Function to create job for KUD installation -func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, clientset *kubernetes.Clientset) error{ +func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, kudPlugins []string, clientset kubernetes.Interface) error{ var backOffLimit int32 = 0 var privi bool = true + installerString := " ./installer --cluster " + clusterName + + // Check if any plugin was specified + if len(kudPlugins) > 0 { + plugins := " --plugins" + + for _, plug := range kudPlugins { + plugins += " " + plug + } + + installerString += plugins + } + jobClient := clientset.BatchV1().Jobs("default") @@ -614,7 +675,7 @@ func createKUDinstallerJob(clusterName, namespace string, labels map[string]stri }, Command: []string{"/bin/sh","-c"}, - Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh; ./installer --cluster " + clusterName}, + Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString}, SecurityContext: &corev1.SecurityContext{ Privileged : &privi, @@ -653,7 +714,7 @@ func createKUDinstallerJob(clusterName, namespace string, labels map[string]stri } //Function to Check if job succeeded -func checkJob(clusterName, namespace string, data, labels map[string]string, clientset *kubernetes.Clientset) { +func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) { fmt.Printf("\nChecking job status for cluster %s\n", clusterName) jobName := "kud-" + clusterName @@ -691,6 +752,7 @@ func checkJob(clusterName, namespace string, data, labels map[string]string, cli return } + //Function to get software list from software CR func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) { @@ -781,3 +843,53 @@ func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error { return nil } + +func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) { + + var vmPodList []VirtletVM + + pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{}) + if err != nil { + fmt.Printf("Could not get pod info, Error: %v\n", err) + return []VirtletVM{}, err + } + + for _, pod := range pods.Items { + var podAnnotation map[string]interface{} + var podStatus corev1.PodStatus + var podDefaultNetStatus []NetworksStatus + + annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations()) + if err != nil { + fmt.Printf("Could not get pod annotations, Error: %v\n", err) + return []VirtletVM{}, err + } + + json.Unmarshal([]byte(annotation), &podAnnotation) + if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil { + runtime := podAnnotation["kubernetes.io/target-runtime"].(string) + + podStatusJson, _ := json.Marshal(pod.Status) + json.Unmarshal([]byte(podStatusJson), &podStatus) + + if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil { + ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string) + json.Unmarshal([]byte(ns), &podDefaultNetStatus) + + vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac}) + } + } + } + + return vmPodList, nil +} + +func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) { + + for i := 0; i < len(vmList); i++ { + if vmList[i].MACaddress == macAddress { + return vmList[i].IPaddress, nil + } + } + return "", nil +}