"regexp"
"strings"
"io/ioutil"
-
+ "encoding/json"
bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/dynamic"
- "k8s.io/client-go/rest"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
"golang.org/x/crypto/ssh"
)
+type VirtletVM struct {
+ IPaddress string
+ MACaddress string
+}
+
+type NetworksStatus struct {
+ Name string `json:"name,omitempty"`
+ Interface string `json:"interface,omitempty"`
+ Ips []string `json:"ips,omitempty"`
+ Mac string `json:"mac,omitempty"`
+ Default bool `json:"default,omitempty"`
+ Dns interface{} `json:"dns,omitempty"`
+}
+
var log = logf.Log.WithName("controller_provisioning")
/**
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
- return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme()}
+
+ config, err := config.GetConfig()
+ if err != nil {
+ fmt.Printf("Could not get kube config, Error: %v\n", err)
+ }
+
+ clientSet, err := kubernetes.NewForConfig(config)
+ if err != nil {
+ fmt.Printf("Could not create clientset, Error: %v\n", err)
+ }
+ bmhDynamicClient, err := dynamic.NewForConfig(config)
+
+ if err != nil {
+ fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
+ }
+
+ return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient }
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
return err
}
-
// Watch for changes to resource software CR
err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
if err != nil {
}
-
return nil
}
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
+ clientset kubernetes.Interface
+ bmhClient dynamic.Interface
}
// Reconcile reads that state of the cluster for a Provisioning object and makes changes based on the state read
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
- //reqLogger.Info("Reconciling Provisioning")
fmt.Printf("\n\n")
reqLogger.Info("Reconciling Custom Resource")
masterTag := "MASTER_"
workerTag := "WORKER_"
- config, err := config.GetConfig()
- if err != nil {
- fmt.Printf("Could not get kube config, Error: %v\n", err)
- return reconcile.Result{}, err
- }
-
- clientset, err := kubernetes.NewForConfig(config)
- if err != nil {
- fmt.Printf("Could not create clientset, Error: %v\n", err)
- return reconcile.Result{}, err
- }
if provisioningCreated {
///////////////////////////////////////////////////////////////////////////////////////////////
//////////////// Provisioning CR was created so install KUD /////////////////
//////////////////////////////////////////////////////////////////////////////////////////////
clusterName := provisioningInstance.Labels["cluster"]
+ clusterType := provisioningInstance.Labels["cluster-type"]
mastersList := provisioningInstance.Spec.Masters
workersList := provisioningInstance.Spec.Workers
- dhcpLeaseFile := provisioningInstance.Spec.DHCPleaseFile
- kudInstallerScript := provisioningInstance.Spec.KUDInstaller
- multiClusterDir := provisioningInstance.Spec.MultiClusterPath
+ kudPlugins := provisioningInstance.Spec.KUDPlugins
+
+
+ bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
+ virtletVMList, _ := listVirtletVMs(r.clientset)
- bareMetalHostList, _ := listBareMetalHosts(config)
var allString string
var masterString string
var workerString string
- defaultDHCPFile := "/var/lib/dhcp/dhcpd.leases"
- defaultKUDInstallerPath := "/multicloud-k8s/kud/hosting_providers/vagrant"
- defaultMultiClusterDir := "/multi-cluster"
-
- //Give Default values for paths if no path is given in the CR
- if dhcpLeaseFile == "" {
- dhcpLeaseFile = defaultDHCPFile
- }
-
- if kudInstallerScript == "" {
- kudInstallerScript = defaultKUDInstallerPath
- }
-
- if multiClusterDir == "" {
- multiClusterDir = defaultMultiClusterDir
- }
+ dhcpLeaseFile := "/var/lib/dhcp/dhcpd.leases"
+ multiClusterDir := "/multi-cluster"
//Create Directory for the specific cluster
clusterDir := multiClusterDir + "/" + clusterName
for masterLabel, master := range masterMap {
masterMAC := master.MACaddress
+ hostIPaddress := ""
if masterMAC == "" {
err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
return reconcile.Result{}, err
}
+
containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
+
+ //Check 'cluster-type' label for Virtlet VMs
+ if clusterType == "virtlet-vm" {
+ //Get VM IP address of master
+ hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
+ if err != nil || hostIPaddress == "" {
+ err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
+ return reconcile.Result{}, err
+ }
+ containsMac = true
+ }
+
if containsMac{
- fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
- //Get IP address of master
- hostIPaddress, err := getHostIPaddress(masterMAC, dhcpLeaseFile )
- if err != nil || hostIPaddress == ""{
- err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
- return reconcile.Result{}, err
- }
+ if clusterType != "virtlet-vm" {
+ fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
+
+ //Get IP address of master
+ hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile )
+ if err != nil || hostIPaddress == ""{
+ err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
+ return reconcile.Result{}, err
+ }
+ }
- allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
- masterString += masterLabel + "\n"
- clusterData[masterTag + masterLabel] = hostIPaddress
+ allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
+ masterString += masterLabel + "\n"
+ clusterData[masterTag + masterLabel] = hostIPaddress
- fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
+ fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
- if len(workersList) != 0 {
+ if len(workersList) != 0 {
- //Iterate through workersList and get all the mac addresses
- for _, workerMap := range workersList {
+ //Iterate through workersList and get all the mac addresses
+ for _, workerMap := range workersList {
- //Get worker labels from the workermap
- for workerLabel, worker := range workerMap {
+ //Get worker labels from the workermap
+ for workerLabel, worker := range workerMap {
- //Check if workerString already contains worker label
- containsWorkerLabel := strings.Contains(workerString, workerLabel)
- workerMAC := worker.MACaddress
+ //Check if workerString already contains worker label
+ containsWorkerLabel := strings.Contains(workerString, workerLabel)
+ workerMAC := worker.MACaddress
+ hostIPaddress = ""
//Error occurs if the same label is given to different hosts (assumption,
//each MAC address represents a unique host
}
containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
+
+ if clusterType == "virtlet-vm" {
+ //Get VM IP address of master
+ hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
+ if err != nil || hostIPaddress == "" {
+ err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
+ return reconcile.Result{}, err
+ }
+ containsMac = true
+ }
+
if containsMac{
- fmt.Printf("Host %s matches that macAddress\n", bmhCR)
-
- //Get IP address of worker
- hostIPaddress, err := getHostIPaddress(workerMAC, dhcpLeaseFile )
- if err != nil {
- fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
- return reconcile.Result{}, err
- }
- fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
+ if clusterType != "virtlet-vm" {
+ fmt.Printf("Host %s matches that macAddress\n", bmhCR)
+
+ //Get IP address of worker
+ hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile )
+ if err != nil {
+ fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
+ return reconcile.Result{}, err
+ }
+ }
+ fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
return reconcile.Result{}, err
}
- }
-
- }
+ }
+ }
}
//No worker node specified, add master as worker node
} else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
return reconcile.Result{}, err
}
+ _, err = hostFile.NewRawSection("ovn-central", masterString)
+ if err != nil {
+ fmt.Printf("Error occured while creating section \n %v", err)
+ return reconcile.Result{}, err
+ }
+
+ _, err = hostFile.NewRawSection("ovn-controller", workerString)
+ if err != nil {
+ fmt.Printf("Error occured while creating section \n %v", err)
+ return reconcile.Result{}, err
+ }
+
+ _, err = hostFile.NewRawSection("virtlet", workerString)
+ if err != nil {
+ fmt.Printf("Error occured while creating section \n %v", err)
+ return reconcile.Result{}, err
+ }
+
_, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master")
if err != nil {
fmt.Printf("Error occured while creating section \n %v", err)
hostFile.SaveTo(iniHostFilePath)
//Install KUD
- err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, clientset)
+ err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, kudPlugins, r.clientset)
if err != nil {
fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
return reconcile.Result{}, err
//Start separate thread to keep checking job status, Create an IP address configmap
//for cluster if KUD is successfully installed
- go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, clientset)
+ go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset)
return reconcile.Result{}, nil
defaultSSHPrivateKey := "/root/.ssh/id_rsa"
//Get IP address configmap for the cluster
- clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, clientset)
+ clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset)
if err != nil {
fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
return reconcile.Result{}, err
return reconcile.Result{}, nil
}
-
//Function to Get List containing baremetal hosts
-func listBareMetalHosts(config *rest.Config) (*unstructured.UnstructuredList, error) {
-
- //Create Dynamic Client for BareMetalHost CRD
- bmhDynamicClient, err := dynamic.NewForConfig(config)
-
- if err != nil {
- fmt.Println("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
- return &unstructured.UnstructuredList{}, err
- }
+func listBareMetalHosts(bmhDynamicClient dynamic.Interface) (*unstructured.UnstructuredList, error) {
//Create GVR representing a BareMetalHost CR
bmhGVR := schema.GroupVersionResource{
//Get List containing all BareMetalHosts CRs
bareMetalHosts, err := bmhDynamicClient.Resource(bmhGVR).List(metav1.ListOptions{})
if err != nil {
- fmt.Println("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
+ fmt.Printf("Error occured, cannot get BareMetalHosts list, Error: %v\n", err)
return &unstructured.UnstructuredList{}, err
}
//Read the dhcp lease file
dhcpFile, err := ioutil.ReadFile(dhcpLeaseFilePath)
if err != nil {
- fmt.Println("Failed to read lease file\n")
+ fmt.Printf("Failed to read lease file\n")
return "", err
}
reg := "lease.*{|ethernet.*|\n. binding state.*"
re, err := regexp.Compile(reg)
if err != nil {
- fmt.Println("Could not create Regexp object, Error %v occured\n", err)
+ fmt.Printf("Could not create Regexp object, Error %v occured\n", err)
return "", err
}
}
//Function to create configmap
-func createConfigMap(data, labels map[string]string, namespace string, clientset *kubernetes.Clientset) error{
+func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error{
configmapClient := clientset.CoreV1().ConfigMaps(namespace)
}
//Function to get configmap Data
-func getConfigMapData(namespace, clusterName string, clientset *kubernetes.Clientset) (map[string]string, error) {
+func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) {
configmapClient := clientset.CoreV1().ConfigMaps(namespace)
configmapName := clusterName + "-configmap"
return configmapData, nil
}
-
//Function to create job for KUD installation
-func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, clientset *kubernetes.Clientset) error{
+func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, kudPlugins []string, clientset kubernetes.Interface) error{
var backOffLimit int32 = 0
var privi bool = true
+ installerString := " ./installer --cluster " + clusterName
+
+ // Check if any plugin was specified
+ if len(kudPlugins) > 0 {
+ plugins := " --plugins"
+
+ for _, plug := range kudPlugins {
+ plugins += " " + plug
+ }
+
+ installerString += plugins
+ }
+
jobClient := clientset.BatchV1().Jobs("default")
},
Command: []string{"/bin/sh","-c"},
- Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh; ./installer --cluster " + clusterName},
+ Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
SecurityContext: &corev1.SecurityContext{
Privileged : &privi,
}
//Function to Check if job succeeded
-func checkJob(clusterName, namespace string, data, labels map[string]string, clientset *kubernetes.Clientset) {
+func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) {
fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
jobName := "kud-" + clusterName
return
}
+
//Function to get software list from software CR
func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
return nil
}
+
+func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
+
+ var vmPodList []VirtletVM
+
+ pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
+ if err != nil {
+ fmt.Printf("Could not get pod info, Error: %v\n", err)
+ return []VirtletVM{}, err
+ }
+
+ for _, pod := range pods.Items {
+ var podAnnotation map[string]interface{}
+ var podStatus corev1.PodStatus
+ var podDefaultNetStatus []NetworksStatus
+
+ annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
+ if err != nil {
+ fmt.Printf("Could not get pod annotations, Error: %v\n", err)
+ return []VirtletVM{}, err
+ }
+
+ json.Unmarshal([]byte(annotation), &podAnnotation)
+ if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
+ runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
+
+ podStatusJson, _ := json.Marshal(pod.Status)
+ json.Unmarshal([]byte(podStatusJson), &podStatus)
+
+ if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["v1.multus-cni.io/default-network"] != nil {
+ ns := podAnnotation["v1.multus-cni.io/default-network"].(string)
+ json.Unmarshal([]byte(ns), &podDefaultNetStatus)
+
+ vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
+ }
+ }
+ }
+
+ return vmPodList, nil
+}
+
+func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
+
+ for i := 0; i < len(vmList); i++ {
+ if vmList[i].MACaddress == macAddress {
+ return vmList[i].IPaddress, nil
+ }
+ }
+ return "", nil
+}