Add e2e script for Virtlet VM provisioning
[icn.git] / cmd / bpa-operator / pkg / controller / provisioning / provisioning_controller.go
index f2d5206..b4e9577 100644 (file)
@@ -191,9 +191,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
        clusterType := provisioningInstance.Labels["cluster-type"]
         mastersList := provisioningInstance.Spec.Masters
         workersList := provisioningInstance.Spec.Workers
-        dhcpLeaseFile := provisioningInstance.Spec.DHCPleaseFile
-        kudInstallerScript := provisioningInstance.Spec.KUDInstaller
-       multiClusterDir := provisioningInstance.Spec.MultiClusterPath
+        kudPlugins := provisioningInstance.Spec.KUDPlugins
 
 
         bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
@@ -206,22 +204,8 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
         var masterString string
         var workerString string
 
-       defaultDHCPFile := "/var/lib/dhcp/dhcpd.leases"
-       defaultKUDInstallerPath := "/multicloud-k8s/kud/hosting_providers/vagrant"
-       defaultMultiClusterDir := "/multi-cluster"
-
-       //Give Default values for paths if no path is given in the CR
-       if dhcpLeaseFile == "" {
-          dhcpLeaseFile = defaultDHCPFile
-       }
-
-       if kudInstallerScript == "" {
-          kudInstallerScript = defaultKUDInstallerPath
-       }
-
-       if multiClusterDir == "" {
-          multiClusterDir = defaultMultiClusterDir
-       }
+       dhcpLeaseFile := "/var/lib/dhcp/dhcpd.leases"
+       multiClusterDir := "/multi-cluster"
 
        //Create Directory for the specific cluster
        clusterDir := multiClusterDir + "/" + clusterName
@@ -273,6 +257,9 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                       }
 
                        allString += masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
+                       if clusterType == "virtlet-vm" {
+                           allString = masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+                       }
                        masterString += masterLabel + "\n"
                        clusterData[masterTag + masterLabel] = hostIPaddress
 
@@ -353,8 +340,10 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                                           }
                                            fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
 
-
                                            allString += workerLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
+                                           if clusterType == "virtlet-vm" {
+                                               allString = masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+                                           }
                                            workerString += workerLabel + "\n"
                                           clusterData[workerTag + workerLabel] = hostIPaddress
 
@@ -425,17 +414,22 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
            return reconcile.Result{}, err
         }
 
-        if clusterType == "virtlet-vm" {
-                _, err = hostFile.NewRawSection("ovn-central", masterString)
-                if err != nil {
-                        fmt.Printf("Error occured while creating section \n %v", err)
-                        return reconcile.Result{}, err
-                }
-                _, err = hostFile.NewRawSection("ovn-controller", masterString)
-                if err != nil {
-                        fmt.Printf("Error occured while creating section \n %v", err)
-                        return reconcile.Result{}, err
-                }
+        _, err = hostFile.NewRawSection("ovn-central", masterString)
+        if err != nil {
+           fmt.Printf("Error occured while creating section \n %v", err)
+           return reconcile.Result{}, err
+        }
+
+        _, err = hostFile.NewRawSection("ovn-controller", workerString)
+        if err != nil {
+           fmt.Printf("Error occured while creating section \n %v", err)
+           return reconcile.Result{}, err
+        }
+
+        _, err = hostFile.NewRawSection("virtlet", workerString)
+        if err != nil {
+           fmt.Printf("Error occured while creating section \n %v", err)
+           return reconcile.Result{}, err
         }
 
         _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master")
@@ -449,7 +443,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
         hostFile.SaveTo(iniHostFilePath)
 
         //Install KUD
-        err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, r.clientset)
+        err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, kudPlugins,  r.clientset)
         if err != nil {
            fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
            return reconcile.Result{}, err
@@ -630,11 +624,24 @@ func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interf
 }
 
 //Function to create job for KUD installation
-func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, clientset kubernetes.Interface) error{
+func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, kudPlugins []string, clientset kubernetes.Interface) error{
 
     var backOffLimit int32 = 0
     var privi bool = true
 
+    installerString := " ./installer --cluster " + clusterName
+
+    // Check if any plugin was specified
+    if len(kudPlugins) > 0 {
+           plugins := " --plugins"
+
+           for _, plug := range kudPlugins {
+              plugins += " " + plug
+           }
+
+          installerString += plugins
+    }
+
 
     jobClient := clientset.BatchV1().Jobs("default")
 
@@ -668,7 +675,7 @@ func createKUDinstallerJob(clusterName, namespace string, labels map[string]stri
 
                                            },
                                            Command: []string{"/bin/sh","-c"},
-                                           Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh; ./installer --cluster " +  clusterName},
+                                           Args: []string{"cp -r /.ssh /root/; chmod -R 600 /root/.ssh;" + installerString},
                                            SecurityContext: &corev1.SecurityContext{
                                                             Privileged : &privi,
 
@@ -865,8 +872,8 @@ func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
                         podStatusJson, _ := json.Marshal(pod.Status)
                         json.Unmarshal([]byte(podStatusJson), &podStatus)
 
-                        if runtime  == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["v1.multus-cni.io/default-network"] != nil {
-                                ns := podAnnotation["v1.multus-cni.io/default-network"].(string)
+                        if runtime  == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
+                                ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
                                 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
 
                                 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
@@ -886,4 +893,3 @@ func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
         }
         return "", nil
 }
-