Merge "Add 'jobs' resource to role.yaml" into dev/icn-v0.2.1
authorKuralamudhan Ramakrishnan <kuralamudhan.ramakrishnan@intel.com>
Wed, 23 Oct 2019 20:35:55 +0000 (20:35 +0000)
committerGerrit Code Review <gerrit@akraino.org>
Wed, 23 Oct 2019 20:35:55 +0000 (20:35 +0000)
cmd/bpa-operator/bpa_operator_launch.sh
cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_multiple.yaml [new file with mode: 0644]
cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_single.yaml [new file with mode: 0644]
cmd/bpa-operator/deploy/netattachdef-flannel-vm.yaml [new file with mode: 0644]
cmd/bpa-operator/deploy/virtlet-deployment-sample.yaml [new file with mode: 0644]
cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go

index 7152fc2..d1f1448 100755 (executable)
@@ -12,7 +12,8 @@ make install
 popd
 
 #Copy bpa operator directory to the right path
-kubectl create -f $PWD/deploy/crds/bpa_v1alpha1_provisioning_crd.yaml 
+kubectl create -f $PWD/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_crd.yaml
+kubectl create -f $PWD/deploy/crds/software-crd/bpa_v1alpha1_software_crd.yaml
 echo $GOPATH
 mkdir -p $GOPATH/src/github.com/ && cp -r $PWD $GOPATH/src/github.com/bpa-operator
 pushd $GOPATH/src/github.com/bpa-operator
diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_multiple.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_multiple.yaml
new file mode 100644 (file)
index 0000000..be026f8
--- /dev/null
@@ -0,0 +1,15 @@
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+  name: provisioning-test-vm-2
+  labels:
+    cluster: vm-cluster-2
+    cluster-type: virtlet-vm
+    owner: c1
+spec:
+  masters:
+    - master-1:
+        mac-address: 00:c6:14:04:61:b2
+  workers:
+    - worker-1:
+         mac-address: 00:c4:13:04:62:b5
diff --git a/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_single.yaml b/cmd/bpa-operator/deploy/crds/provisioning-crd/bpa_v1alpha1_provisioning_cr_vm_single.yaml
new file mode 100644 (file)
index 0000000..a780297
--- /dev/null
@@ -0,0 +1,12 @@
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+  name: provisioning-test-vm-1
+  labels:
+    cluster: vm-cluster-1
+    cluster-type: virtlet-vm
+    owner: c1
+spec:
+  masters:
+    - master-1:
+        mac-address: c2:b4:57:49:47:f1
diff --git a/cmd/bpa-operator/deploy/netattachdef-flannel-vm.yaml b/cmd/bpa-operator/deploy/netattachdef-flannel-vm.yaml
new file mode 100644 (file)
index 0000000..4cdf089
--- /dev/null
@@ -0,0 +1,20 @@
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+  name: flannel-vm
+spec:
+  config: '{
+            "cniVersion": "0.3.1",
+            "name" : "flannel-vm",
+            "plugins": [ {
+              "type": "flannel",
+              "cniVersion": "0.3.1",
+              "masterplugin": true,
+              "delegate": {
+                  "isDefaultGateway": true
+              }
+            },
+            {
+              "type": "tuning"
+            }]
+          }'
diff --git a/cmd/bpa-operator/deploy/virtlet-deployment-sample.yaml b/cmd/bpa-operator/deploy/virtlet-deployment-sample.yaml
new file mode 100644 (file)
index 0000000..cbd0122
--- /dev/null
@@ -0,0 +1,68 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: virtlet-deployment
+  labels:
+    app: virtlet
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: virtlet
+  template:
+    metadata:
+      labels:
+        app: virtlet
+      annotations:
+        VirtletLibvirtCPUSetting: |
+          mode: host-passthrough
+        # This tells CRI Proxy that this pod belongs to Virtlet runtime
+        kubernetes.io/target-runtime: virtlet.cloud
+        VirtletCloudInitUserData: |
+          ssh_pwauth: True
+          disable_root: false
+          chpasswd: {expire: False}
+          users:
+          - name: root
+            gecos: User
+            primary-group: root
+            groups: users
+            lock_passwd: false
+            shell: /bin/bash
+            sudo: ALL=(ALL) NOPASSWD:ALL
+            ssh_authorized_keys:
+            # SSH key goes here
+          runcmd:
+            - echo hello world
+        v1.multus-cni.io/default-network: '[
+            { "name": "flannel-vm",
+              "mac": "c2:b4:57:49:47:f1" }]'
+        VirtletRootVolumeSize: 12Gi
+    spec:
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: extraRuntime
+                operator: In
+                values:
+                - virtlet
+      containers:
+      - name: virtlet-deployment
+        # This specifies the image to use.
+        # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+        # of the image name is prepended with https:// and used to download the image
+        image: virtlet.cloud/ubuntu/18.04
+        imagePullPolicy: IfNotPresent
+        # tty and stdin required for "kubectl attach -t" to work
+        tty: true
+        stdin: true
+        resources:
+          requests:
+            cpu: 2
+            memory: 12Gi
+          limits:
+            # This memory limit is applied to the libvirt domain definition
+            cpu: 2
+            memory: 12Gi
index 4822db8..dd13d88 100644 (file)
@@ -9,7 +9,7 @@ import (
         "regexp"
         "strings"
         "io/ioutil"
-
+        "encoding/json"
 
         bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
         metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -35,6 +35,20 @@ import (
        "golang.org/x/crypto/ssh"
 )
 
+type VirtletVM struct {
+        IPaddress string
+        MACaddress string
+}
+
+type NetworksStatus struct {
+        Name string `json:"name,omitempty"`
+        Interface string `json:"interface,omitempty"`
+        Ips []string `json:"ips,omitempty"`
+        Mac string `json:"mac,omitempty"`
+        Default bool `json:"default,omitempty"`
+        Dns interface{} `json:"dns,omitempty"`
+}
+
 var log = logf.Log.WithName("controller_provisioning")
 
 /**
@@ -87,7 +101,6 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
                 return err
         }
 
-
         // Watch for changes to resource software CR
         err = c.Watch(&source.Kind{Type: &bpav1alpha1.Software{}}, &handler.EnqueueRequestForObject{})
         if err != nil {
@@ -95,7 +108,6 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
         }
 
 
-
         return nil
 }
 
@@ -171,6 +183,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
         ////////////////         Provisioning CR was created so install KUD          /////////////////
         //////////////////////////////////////////////////////////////////////////////////////////////
        clusterName := provisioningInstance.Labels["cluster"]
+       clusterType := provisioningInstance.Labels["cluster-type"]
         mastersList := provisioningInstance.Spec.Masters
         workersList := provisioningInstance.Spec.Workers
         dhcpLeaseFile := provisioningInstance.Spec.DHCPleaseFile
@@ -179,7 +192,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
 
 
         bareMetalHostList, _ := listBareMetalHosts(config)
-
+        virtletVMList, _ := listVirtletVMs()
 
         var allString string
         var masterString string
@@ -218,39 +231,57 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
 
                 for masterLabel, master := range masterMap {
                    masterMAC := master.MACaddress
+                   hostIPaddress := ""
 
                    if masterMAC == "" {
                       err = fmt.Errorf("MAC address for masterNode %s not provided\n", masterLabel)
                       return reconcile.Result{}, err
                    }
+
                    containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
+
+                  //Check 'cluster-type' label for Virtlet VMs
+                  if clusterType == "virtlet-vm" {
+                       //Get VM IP address of master
+                       hostIPaddress, err = getVMIPaddress(virtletVMList, masterMAC)
+                       if err != nil || hostIPaddress == "" {
+                           err = fmt.Errorf("IP address not found for VM with MAC address %s \n", masterMAC)
+                           return reconcile.Result{}, err
+                       }
+                       containsMac = true
+                  }
+
                    if containsMac{
-                      fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
 
-                      //Get IP address of master
-                      hostIPaddress, err := getHostIPaddress(masterMAC, dhcpLeaseFile )
-                      if err != nil || hostIPaddress == ""{
-                        err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
-                        return reconcile.Result{}, err
-                      }
+                      if clusterType != "virtlet-vm" {
+                           fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
+
+                           //Get IP address of master
+                           hostIPaddress, err = getHostIPaddress(masterMAC, dhcpLeaseFile )
+                           if err != nil || hostIPaddress == ""{
+                               err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
+                               return reconcile.Result{}, err
+                           }
+                      }
 
-                      allString += masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
-                      masterString += masterLabel + "\n"
-                     clusterData[masterTag + masterLabel] = hostIPaddress
+                       allString += masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
+                       masterString += masterLabel + "\n"
+                       clusterData[masterTag + masterLabel] = hostIPaddress
 
-                      fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
+                       fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
 
-                      if len(workersList) != 0 {
+                       if len(workersList) != 0 {
 
-                          //Iterate through workersList and get all the mac addresses
-                          for _, workerMap := range workersList {
+                           //Iterate through workersList and get all the mac addresses
+                           for _, workerMap := range workersList {
 
-                              //Get worker labels from the workermap
-                              for workerLabel, worker := range workerMap {
+                               //Get worker labels from the workermap
+                               for workerLabel, worker := range workerMap {
 
-                                  //Check if workerString already contains worker label
-                                  containsWorkerLabel := strings.Contains(workerString, workerLabel)
-                                  workerMAC := worker.MACaddress
+                                   //Check if workerString already contains worker label
+                                   containsWorkerLabel := strings.Contains(workerString, workerLabel)
+                                   workerMAC := worker.MACaddress
+                                   hostIPaddress = ""
 
                                    //Error occurs if the same label is given to different hosts (assumption,
                                    //each MAC address represents a unique host
@@ -289,17 +320,30 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                                          }
 
                                         containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
+
+                                       if clusterType == "virtlet-vm" {
+                                           //Get VM IP address of master
+                                           hostIPaddress, err = getVMIPaddress(virtletVMList, workerMAC)
+                                           if err != nil || hostIPaddress == "" {
+                                               err = fmt.Errorf("IP address not found for VM with MAC address %s \n", workerMAC)
+                                               return reconcile.Result{}, err
+                                           }
+                                           containsMac = true
+                                       }
+
                                         if containsMac{
-                                           fmt.Printf("Host %s matches that macAddress\n", bmhCR)
-
-                                           //Get IP address of worker
-                                           hostIPaddress, err := getHostIPaddress(workerMAC, dhcpLeaseFile )
-                                           if err != nil {
-                                              fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
-                                              return reconcile.Result{}, err
-                                           }
-                                           fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
 
+                                          if clusterType != "virtlet-vm" {
+                                               fmt.Printf("Host %s matches that macAddress\n", bmhCR)
+
+                                               //Get IP address of worker
+                                               hostIPaddress, err = getHostIPaddress(workerMAC, dhcpLeaseFile )
+                                               if err != nil {
+                                                   fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
+                                                   return reconcile.Result{}, err
+                                               }
+                                          }
+                                           fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
 
 
                                            allString += workerLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
@@ -312,9 +356,8 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                                             err = fmt.Errorf("Host with MAC Address %s not found\n", workerMAC)
                                             return reconcile.Result{}, err
                                           }
-                                     }
-
-                         }
+                                   }
+                             }
                        }
                    //No worker node specified, add master as worker node
                    } else if len(workersList) == 0 && !strings.Contains(workerString, masterLabel) {
@@ -374,6 +417,19 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
            return reconcile.Result{}, err
         }
 
+        if clusterType == "virtlet-vm" {
+                _, err = hostFile.NewRawSection("ovn-central", masterString)
+                if err != nil {
+                        fmt.Printf("Error occured while creating section \n %v", err)
+                        return reconcile.Result{}, err
+                }
+                _, err = hostFile.NewRawSection("ovn-controller", masterString)
+                if err != nil {
+                        fmt.Printf("Error occured while creating section \n %v", err)
+                        return reconcile.Result{}, err
+                }
+        }
+
         _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master")
         if err != nil {
            fmt.Printf("Error occured while creating section \n %v", err)
@@ -436,7 +492,6 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
         return reconcile.Result{}, nil
 }
 
-
 //Function to Get List containing baremetal hosts
 func listBareMetalHosts(config *rest.Config) (*unstructured.UnstructuredList, error) {
 
@@ -574,7 +629,6 @@ func getConfigMapData(namespace, clusterName string, clientset *kubernetes.Clien
      return configmapData, nil
 }
 
-
 //Function to create job for KUD installation
 func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, clientset *kubernetes.Clientset) error{
 
@@ -691,6 +745,7 @@ func checkJob(clusterName, namespace string, data, labels map[string]string, cli
     return
 
 }
+
 //Function to get software list from software CR
 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
 
@@ -781,3 +836,66 @@ func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
     return nil
 
 }
+
+func listVirtletVMs() ([]VirtletVM, error) {
+
+        var vmPodList []VirtletVM
+
+        config, err :=  config.GetConfig()
+        if err != nil {
+                fmt.Println("Could not get kube config, Error: %v\n", err)
+                return []VirtletVM{}, err
+        }
+
+        // create the clientset
+        clientset, err := kubernetes.NewForConfig(config)
+        if err != nil {
+                fmt.Println("Could not create the client set, Error: %v\n", err)
+                return []VirtletVM{}, err
+        }
+
+        pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
+        if err != nil {
+                fmt.Println("Could not get pod info, Error: %v\n", err)
+                return []VirtletVM{}, err
+        }
+
+        for _, pod := range pods.Items {
+                var podAnnotation map[string]interface{}
+                var podStatus corev1.PodStatus
+                var podDefaultNetStatus []NetworksStatus
+
+                annotation, err := json.Marshal(pod.ObjectMeta.GetAnnotations())
+                if err != nil {
+                        fmt.Println("Could not get pod annotations, Error: %v\n", err)
+                        return []VirtletVM{}, err
+                }
+
+                json.Unmarshal([]byte(annotation), &podAnnotation)
+                if podAnnotation != nil && podAnnotation["kubernetes.io/target-runtime"] != nil {
+                        runtime := podAnnotation["kubernetes.io/target-runtime"].(string)
+
+                        podStatusJson, _ := json.Marshal(pod.Status)
+                        json.Unmarshal([]byte(podStatusJson), &podStatus)
+
+                        if runtime  == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["v1.multus-cni.io/default-network"] != nil {
+                                ns := podAnnotation["v1.multus-cni.io/default-network"].(string)
+                                json.Unmarshal([]byte(ns), &podDefaultNetStatus)
+
+                                vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
+                        }
+                }
+        }
+
+        return vmPodList, nil
+}
+
+func getVMIPaddress(vmList []VirtletVM, macAddress string) (string, error) {
+
+        for i := 0; i < len(vmList); i++ {
+                if vmList[i].MACaddress == macAddress {
+                        return vmList[i].IPaddress, nil
+                }
+        }
+        return "", nil
+}