Modified BPA controller and fixed bugs 02/2202/1
authorItohan <itohan.ukponmwan@intel.com>
Fri, 17 Jan 2020 18:26:48 +0000 (10:26 -0800)
committerItohan <itohan.ukponmwan@intel.com>
Fri, 17 Jan 2020 18:26:48 +0000 (10:26 -0800)
- Added configmap to store mac addresses for provisioning CR
- Solved MAC address config issue in multiple provisioning CR
- Changed clientset to client
- Deleting provisioning CR deletes child resources
- Updating provisiong CR will restart KUD job and update
  IP address configmap and MAC address configmap

Issue-ID: ICN-187, ICN-173
Signed-off-by: Itohan Ukponmwan <itohan.ukponmwan@intel.com>
Change-Id: I473dc3e9f03bfe70e4e289a7c9402ec7d910f2ab

cmd/bpa-operator/go.mod
cmd/bpa-operator/go.sum
cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go
cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller_test.go
cmd/bpa-operator/vendor/modules.txt
cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go [new file with mode: 0644]
cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go [new file with mode: 0644]

index f7efc3b..9527a6e 100644 (file)
@@ -2,6 +2,7 @@ module github.com/bpa-operator
 
 require (
        github.com/NYTimes/gziphandler v1.0.1 // indirect
+       github.com/metal3-io/baremetal-operator v0.0.0-20191015111357-fc67cc20e40b
        github.com/operator-framework/operator-sdk v0.10.0
        github.com/spf13/pflag v1.0.3
        golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5
index ca72ebe..9dd545e 100644 (file)
@@ -300,6 +300,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
 github.com/maxbrunsfeld/counterfeiter v0.0.0-20181017030959-1aadac120687/go.mod h1:aoVsckWnsNzazwF2kmD+bzgdr4GBlbK91zsdivQJ2eU=
 github.com/metal3-io/baremetal-operator v0.0.0-20191015111357-fc67cc20e40b h1:I28+Qj1D5DwG4uVbxLGooVxNmofzKiN5+g4N09bwPsI=
 github.com/metal3-io/baremetal-operator v0.0.0-20191015111357-fc67cc20e40b/go.mod h1:o9ta8R2EEtSiQY53sXdoM50v5531G0oS+lC58Gcm+1Y=
+github.com/metal3-io/baremetal-operator v0.0.0-20191210182307-a45a8ceef88d h1:czPGW+avZqDHYI4fRDc/IK7UFyR0ksV11DUmnSkJ8SA=
+github.com/metal3-io/baremetal-operator v0.0.0-20191210182307-a45a8ceef88d/go.mod h1:o9ta8R2EEtSiQY53sXdoM50v5531G0oS+lC58Gcm+1Y=
 github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
 github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
 github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
index b4e9577..d6946c8 100644 (file)
@@ -19,12 +19,13 @@ import (
         "k8s.io/apimachinery/pkg/runtime/schema"
         "k8s.io/apimachinery/pkg/api/errors"
         "k8s.io/apimachinery/pkg/runtime"
+        "k8s.io/apimachinery/pkg/types"
         "k8s.io/client-go/dynamic"
 
-        "k8s.io/client-go/kubernetes"
         "sigs.k8s.io/controller-runtime/pkg/client"
         "sigs.k8s.io/controller-runtime/pkg/client/config"
         "sigs.k8s.io/controller-runtime/pkg/controller"
+        "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
         "sigs.k8s.io/controller-runtime/pkg/handler"
         "sigs.k8s.io/controller-runtime/pkg/manager"
         "sigs.k8s.io/controller-runtime/pkg/reconcile"
@@ -69,17 +70,13 @@ func newReconciler(mgr manager.Manager) reconcile.Reconciler {
            fmt.Printf("Could not get kube config, Error: %v\n", err)
         }
 
-       clientSet, err := kubernetes.NewForConfig(config)
-        if err != nil {
-           fmt.Printf("Could not create clientset, Error: %v\n", err)
-        }
        bmhDynamicClient, err := dynamic.NewForConfig(config)
 
        if err != nil {
           fmt.Printf("Could not create dynamic client for bareMetalHosts, Error: %v\n", err)
        }
 
-       return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), clientset: clientSet, bmhClient: bmhDynamicClient }
+       return &ReconcileProvisioning{client: mgr.GetClient(), scheme: mgr.GetScheme(), bmhClient: bmhDynamicClient }
 }
 
 // add adds a new Controller to mgr with r as the reconcile.Reconciler
@@ -135,7 +132,6 @@ type ReconcileProvisioning struct {
         // that reads objects from the cache and writes to the apiserver
         client client.Client
         scheme *runtime.Scheme
-        clientset kubernetes.Interface
         bmhClient dynamic.Interface
 }
 
@@ -187,16 +183,15 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
         ///////////////////////////////////////////////////////////////////////////////////////////////
         ////////////////         Provisioning CR was created so install KUD          /////////////////
         //////////////////////////////////////////////////////////////////////////////////////////////
+        provisioningVersion := provisioningInstance.ResourceVersion
        clusterName := provisioningInstance.Labels["cluster"]
        clusterType := provisioningInstance.Labels["cluster-type"]
         mastersList := provisioningInstance.Spec.Masters
         workersList := provisioningInstance.Spec.Workers
-        kudPlugins := provisioningInstance.Spec.KUDPlugins
 
 
         bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
-        virtletVMList, _ := listVirtletVMs(r.clientset)
-
+        virtletVMList, _ := listVirtletVMs(r.client)
 
 
 
@@ -212,9 +207,8 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
        os.MkdirAll(clusterDir, os.ModePerm)
 
        //Create Maps to be used for cluster ip address to label configmap
-       clusterLabel := make(map[string]string)
-        clusterLabel["cluster"] = clusterName
        clusterData := make(map[string]string)
+       clusterMACData := make(map[string]string)
 
 
 
@@ -230,6 +224,25 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                       return reconcile.Result{}, err
                    }
 
+
+                   // Check if master MAC address has already been used
+                   usedMAC, err := r.macAddressUsed(provisioningInstance.Namespace, masterMAC, clusterName)
+
+
+                   if err != nil {
+
+                      fmt.Printf("Error occured while checking if mac Address has already been used\n %v", err)
+                      return reconcile.Result{}, err
+                   }
+
+                   if usedMAC {
+
+                      err = fmt.Errorf("MAC address %s has already been used, check and update provisioning CR", masterMAC)
+                      return reconcile.Result{}, err
+
+                   }
+
+                   // Check if Baremetal host with specified MAC address exist
                    containsMac, bmhCR := checkMACaddress(bareMetalHostList, masterMAC)
 
                   //Check 'cluster-type' label for Virtlet VMs
@@ -243,7 +256,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                        containsMac = true
                   }
 
-                   if containsMac{
+                   if containsMac {
 
                       if clusterType != "virtlet-vm" {
                            fmt.Printf("BareMetalHost CR %s has NIC with MAC Address %s\n", bmhCR, masterMAC)
@@ -262,9 +275,12 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                        }
                        masterString += masterLabel + "\n"
                        clusterData[masterTag + masterLabel] = hostIPaddress
+                       clusterMACData[strings.ReplaceAll(masterMAC, ":", "-")] = masterTag + masterLabel
 
                        fmt.Printf("%s : %s \n", hostIPaddress, masterMAC)
 
+
+                       // Check if any worker MAC address was specified
                        if len(workersList) != 0 {
 
                            //Iterate through workersList and get all the mac addresses
@@ -314,6 +330,22 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                                           return reconcile.Result{}, err
                                          }
 
+                                        // Check if worker MAC address has already been used
+                                        usedMAC, err = r.macAddressUsed(provisioningInstance.Namespace, workerMAC, clusterName)
+
+                                        if err != nil {
+
+                                           fmt.Printf("Error occured while checking if mac Address has already been used\n %v", err)
+                                           return reconcile.Result{}, err
+                                        }
+
+                                        if usedMAC {
+
+                                           err = fmt.Errorf("MAC address %s has already been used, check and update provisioning CR", workerMAC)
+                                           return reconcile.Result{}, err
+
+                                        }
+
                                         containsMac, bmhCR := checkMACaddress(bareMetalHostList, workerMAC)
 
                                        if clusterType == "virtlet-vm" {
@@ -346,6 +378,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                                            }
                                            workerString += workerLabel + "\n"
                                           clusterData[workerTag + workerLabel] = hostIPaddress
+                                          clusterMACData[strings.ReplaceAll(workerMAC, ":", "-")] = workerTag + workerLabel
 
                                        //No host found that matches the worker MAC
                                        } else {
@@ -374,7 +407,6 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
         }
 
         //Create host.ini file
-        //iniHostFilePath := kudInstallerScript + "/inventory/hosts.ini"
         iniHostFilePath := clusterDir + "/hosts.ini"
         newFile, err := os.Create(iniHostFilePath)
         defer newFile.Close()
@@ -442,8 +474,42 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
        //Create host.ini file for KUD
         hostFile.SaveTo(iniHostFilePath)
 
+        // Create configmap to store MAC address info for the cluster
+        cmName := provisioningInstance.Labels["cluster"] + "-mac-addresses"
+        foundConfig := &corev1.ConfigMap{}
+        err = r.client.Get(context.TODO(), types.NamespacedName{Name: cmName, Namespace: provisioningInstance.Namespace}, foundConfig)
+
+        // Configmap was found but the provisioning CR was updated so update configMap
+        if err == nil && foundConfig.Labels["provisioning-version"] != provisioningVersion {
+
+           foundConfig.Data = clusterMACData
+           foundConfig.Labels =  provisioningInstance.Labels
+           foundConfig.Labels["configmap-type"] = "mac-address"
+           foundConfig.Labels["provisioning-version"] = provisioningVersion
+           err = r.client.Update(context.TODO(), foundConfig)
+           if err != nil {
+              fmt.Printf("Error occured while updating mac address configmap for provisioningCR %s\n ERROR: %v\n", provisioningInstance.Name,
+              err)
+              return reconcile.Result{}, err
+           }
+
+        }  else if err != nil && errors.IsNotFound(err) {
+           labels :=  provisioningInstance.Labels
+           labels["configmap-type"] = "mac-address"
+           labels["provisioning-version"] = provisioningVersion
+           err = r.createConfigMap(provisioningInstance, clusterMACData, labels, cmName)
+           if err != nil {
+              fmt.Printf("Error occured while creating MAC address configmap for cluster %v\n ERROR: %v", clusterName, err)
+              return reconcile.Result{}, err
+            }
+
+        } else if err != nil {
+            fmt.Printf("ERROR occured in Create MAC address Config map section: %v\n", err)
+            return reconcile.Result{}, err
+          }
+
         //Install KUD
-        err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, kudPlugins,  r.clientset)
+        err = r.createKUDinstallerJob(provisioningInstance)
         if err != nil {
            fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
            return reconcile.Result{}, err
@@ -451,7 +517,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
 
         //Start separate thread to keep checking job status, Create an IP address configmap
         //for cluster if KUD is successfully installed
-        go checkJob(clusterName, request.Namespace, clusterData, clusterLabel, r.clientset)
+        go r.checkJob(provisioningInstance, clusterData)
 
         return reconcile.Result{}, nil
 
@@ -466,7 +532,8 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
         defaultSSHPrivateKey := "/root/.ssh/id_rsa"
 
         //Get IP address configmap for the cluster
-        clusterConfigMapData, err := getConfigMapData(request.Namespace, softwareClusterName, r.clientset)
+        configmapName :=  softwareInstance.Labels["cluster"] + "-configmap"
+        clusterConfigMapData, err := r.getConfigMapData(softwareInstance.Namespace, configmapName)
         if err != nil {
            fmt.Printf("Error occured while retrieving IP address Data for cluster %s, ERROR: %v\n", softwareClusterName, err)
            return reconcile.Result{}, err
@@ -536,6 +603,45 @@ func checkMACaddress(bareMetalHostList *unstructured.UnstructuredList, macAddres
 
 }
 
+//Function to check if MAC address has been used in another provisioning CR
+//Returns true if MAC address has already been used
+func (r *ReconcileProvisioning) macAddressUsed(namespace, macAddress, provisioningCluster string) (bool, error) {
+
+     macKey := strings.ReplaceAll(macAddress, ":", "-")
+
+     //Get configmap List
+     cmList := &corev1.ConfigMapList{}
+     label := map[string]string{"configmap-type": "mac-address"}
+     listOpts :=  client.MatchingLabels(label)
+
+     err := r.client.List(context.TODO(), listOpts, cmList)
+     if err != nil {
+        return false, err
+     }
+
+     for _, configmap := range cmList.Items {
+
+        cmCluster := configmap.Labels["cluster"]
+        if cmCluster != provisioningCluster {
+            cmData, err := r.getConfigMapData(namespace, configmap.Name)
+            if err != nil {
+               return false, err
+
+            }
+
+            if _, exist := cmData[macKey]; exist {
+
+               return exist, nil
+            }
+       }
+
+     }
+
+     return false, nil
+
+}
+
+
 
 //Function to get the IP address of a host from the DHCP file
 func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, error) {
@@ -585,36 +691,38 @@ func getHostIPaddress(macAddress string, dhcpLeaseFilePath string ) (string, err
      return "", nil
 }
 
-//Function to create configmap 
-func createConfigMap(data, labels map[string]string, namespace string, clientset kubernetes.Interface) error{
+//Function to create configmap
+func (r *ReconcileProvisioning) createConfigMap(p *bpav1alpha1.Provisioning, data, labels map[string]string, cmName string) error{
 
-     configmapClient := clientset.CoreV1().ConfigMaps(namespace)
+         // Configmap has not been created, create it
+          configmap := &corev1.ConfigMap{
 
-     configmap := &corev1.ConfigMap{
-
-        ObjectMeta: metav1.ObjectMeta{
-                        Name: labels["cluster"] + "-configmap",
+              ObjectMeta: metav1.ObjectMeta{
+                        Name: cmName,
+                       Namespace: p.Namespace,
                         Labels: labels,
-                },
-        Data: data,
-     }
+                      },
+              Data: data,
+          }
 
 
-      _, err := configmapClient.Create(configmap)
-      if err != nil {
-         return err
+         // Set provisioning instance as the owner of the job
+         controllerutil.SetControllerReference(p, configmap, r.scheme)
+         err := r.client.Create(context.TODO(), configmap)
+         if err != nil {
+             return err
 
-      }
-      return nil
+         }
+
+     return nil
 
 }
 
 //Function to get configmap Data
-func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interface) (map[string]string, error) {
+func (r *ReconcileProvisioning) getConfigMapData(namespace, configmapName string) (map[string]string, error) {
 
-     configmapClient := clientset.CoreV1().ConfigMaps(namespace)
-     configmapName := clusterName + "-configmap"
-     clusterConfigmap, err := configmapClient.Get(configmapName, metav1.GetOptions{})
+     clusterConfigmap := &corev1.ConfigMap{}
+     err := r.client.Get(context.TODO(), types.NamespacedName{Name: configmapName, Namespace: namespace}, clusterConfigmap)
      if err != nil {
         return nil, err
      }
@@ -624,37 +732,59 @@ func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interf
 }
 
 //Function to create job for KUD installation
-func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, kudPlugins []string, clientset kubernetes.Interface) error{
+func (r *ReconcileProvisioning) createKUDinstallerJob(p *bpav1alpha1.Provisioning) error{
 
     var backOffLimit int32 = 0
     var privi bool = true
 
-    installerString := " ./installer --cluster " + clusterName
 
-    // Check if any plugin was specified
-    if len(kudPlugins) > 0 {
+    kudPlugins := p.Spec.KUDPlugins
+
+    jobLabel := p.Labels
+    jobLabel["provisioning-version"] = p.ResourceVersion
+    jobName := "kud-" + jobLabel["cluster"]
+
+    // Check if the job already exist
+    foundJob := &batchv1.Job{}
+    err := r.client.Get(context.TODO(), types.NamespacedName{Name: jobName, Namespace: p.Namespace}, foundJob)
+
+    if (err == nil && foundJob.Labels["provisioning-version"] != jobLabel["provisioning-version"]) || (err != nil && errors.IsNotFound(err)) {
+
+       // If err == nil and its in this statement, then provisioning CR was updated, delete job
+       if err == nil {
+          err = r.client.Delete(context.TODO(), foundJob, client.PropagationPolicy(metav1.DeletePropagationForeground))
+          if err != nil {
+             fmt.Printf("Error occured while deleting kud install job for updated provisioning CR %v\n", err)
+             return err
+          }
+       }
+
+       // Job has not been created, create a new kud job
+       installerString := " ./installer --cluster " + p.Labels["cluster"]
+
+       // Check if any plugin was specified
+       if len(kudPlugins) > 0 {
            plugins := " --plugins"
 
            for _, plug := range kudPlugins {
-              plugins += " " + plug
+               plugins += " " + plug
            }
 
           installerString += plugins
-    }
-
-
-    jobClient := clientset.BatchV1().Jobs("default")
+       }
 
-        job := &batchv1.Job{
+       // Define new job
+       job := &batchv1.Job{
 
         ObjectMeta: metav1.ObjectMeta{
-                        Name: "kud-" + clusterName,
-                       Labels: labels,
+                       Name: jobName,
+                       Namespace: p.Namespace,
+                       Labels: jobLabel,
                 },
                 Spec: batchv1.JobSpec{
                       Template: corev1.PodTemplateSpec{
                                 ObjectMeta: metav1.ObjectMeta{
-                                        Labels: labels,
+                                        Labels: p.Labels,
                                 },
 
 
@@ -703,27 +833,36 @@ func createKUDinstallerJob(clusterName, namespace string, labels map[string]stri
                              BackoffLimit : &backOffLimit,
                              },
 
-                         }
-                    _, err := jobClient.Create(job)
-                    if err != nil {
-                       fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
-                       return err
-                    }
-                    return nil
+       }
+
+       // Set provisioning instance as the owner and controller of the job
+       controllerutil.SetControllerReference(p, job, r.scheme)
+       err = r.client.Create(context.TODO(), job)
+       if err != nil {
+          fmt.Printf("ERROR occured while creating job to install KUD\n ERROR:%v", err)
+          return err
+          }
+
+    } else if err != nil {
+         return err
+    }
+
+   return nil
 
 }
 
+
 //Function to Check if job succeeded
-func checkJob(clusterName, namespace string, data, labels map[string]string, clientset kubernetes.Interface) {
+func (r *ReconcileProvisioning) checkJob(p *bpav1alpha1.Provisioning, data map[string]string) {
 
+     clusterName := p.Labels["cluster"]
      fmt.Printf("\nChecking job status for cluster %s\n", clusterName)
      jobName := "kud-" + clusterName
-     jobClient := clientset.BatchV1().Jobs(namespace)
+     job := &batchv1.Job{}
 
      for {
-         time.Sleep(2 * time.Second)
 
-         job, err := jobClient.Get(jobName, metav1.GetOptions{})
+         err := r.client.Get(context.TODO(), types.NamespacedName{Name: jobName, Namespace: p.Namespace}, job)
          if err != nil {
             fmt.Printf("ERROR: %v occured while retrieving job: %s", err, jobName)
             return
@@ -735,24 +874,51 @@ func checkJob(clusterName, namespace string, data, labels map[string]string, cli
             fmt.Printf("\n Job succeeded, KUD successfully installed in Cluster %s\n", clusterName)
 
             //KUD was installed successfully create configmap to store IP address info for the cluster
-            err = createConfigMap(data, labels, namespace, clientset)
-            if err != nil {
-               fmt.Printf("Error occured while creating Ip address configmap for cluster %v\n ERROR: %v", clusterName, err)
+            labels := p.Labels
+            labels["provisioning-version"] = p.ResourceVersion
+            cmName := labels["cluster"] + "-configmap"
+            foundConfig := &corev1.ConfigMap{}
+            err := r.client.Get(context.TODO(), types.NamespacedName{Name: cmName, Namespace: p.Namespace}, foundConfig)
+
+            // Check if provisioning CR was updated
+            if err == nil && foundConfig.Labels["provisioning-version"] != labels["provisioning-version"] {
+
+               foundConfig.Data = data
+               foundConfig.Labels =  labels
+               err = r.client.Update(context.TODO(), foundConfig)
+               if err != nil {
+                  fmt.Printf("Error occured while updating IP address configmap for provisioningCR %s\n ERROR: %v\n", p.Name, err)
+                  return
+                }
+
+            } else if err != nil && errors.IsNotFound(err) {
+               err = r.createConfigMap(p, data, labels, cmName)
+               if err != nil {
+                  fmt.Printf("Error occured while creating IP address configmap for cluster %v\n ERROR: %v", clusterName, err)
+                  return
+               }
                return
-            }
-            return
+
+            } else if err != nil {
+              fmt.Printf("ERROR occured while checking if IP address configmap %v already exists: %v\n", cmName, err)
+              return
+              }
+
+
+           return
          }
 
         if jobFailed == 1 {
            fmt.Printf("\n Job Failed, KUD not installed in Cluster %s, check pod logs\n", clusterName)
+
            return
         }
 
+        time.Sleep(5 * time.Second)
      }
     return
 
 }
-
 //Function to get software list from software CR
 func getSoftwareList(softwareCR *bpav1alpha1.Software) (string, []interface{}, []interface{}) {
 
@@ -844,11 +1010,14 @@ func sshInstaller(softwareString, sshPrivateKey, ipAddress string) error {
 
 }
 
-func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
+
+// List virtlet VMs
+func listVirtletVMs(vmClient client.Client) ([]VirtletVM, error) {
 
         var vmPodList []VirtletVM
 
-        pods, err := clientset.CoreV1().Pods("").List(metav1.ListOptions{})
+        pods := &corev1.PodList{}
+        err := vmClient.List(context.TODO(), &client.ListOptions{}, pods)
         if err != nil {
                 fmt.Printf("Could not get pod info, Error: %v\n", err)
                 return []VirtletVM{}, err
index 9ec0aa5..6c9e330 100644 (file)
@@ -2,12 +2,14 @@ package provisioning
 
 import (
 
+       "context"
        "testing"
        "io/ioutil"
        "os"
 
        bpav1alpha1 "github.com/bpa-operator/pkg/apis/bpa/v1alpha1"
        metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+       batchv1 "k8s.io/api/batch/v1"
        logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
        "k8s.io/apimachinery/pkg/runtime"
        "k8s.io/apimachinery/pkg/types"
@@ -16,7 +18,6 @@ import (
        "sigs.k8s.io/controller-runtime/pkg/client/fake"
        "sigs.k8s.io/controller-runtime/pkg/reconcile"
        fakedynamic "k8s.io/client-go/dynamic/fake"
-       fakeclientset "k8s.io/client-go/kubernetes/fake"
 )
 
 func TestProvisioningController(t *testing.T) {
@@ -55,12 +56,11 @@ func TestProvisioningController(t *testing.T) {
 
     sc.AddKnownTypes(bpav1alpha1.SchemeGroupVersion, provisioning, provisioning2, provisioning3)
 
-    // Create Fake Clients and Clientset
+    // Create Fake Clients
     fakeClient := fake.NewFakeClient(objs...)
     fakeDyn := fakedynamic.NewSimpleDynamicClient(sc, bmhList,)
-    fakeClientSet := fakeclientset.NewSimpleClientset()
 
-    r := &ReconcileProvisioning{client: fakeClient, scheme: sc, clientset: fakeClientSet, bmhClient: fakeDyn}
+    r := &ReconcileProvisioning{client: fakeClient, scheme: sc, bmhClient: fakeDyn}
 
     // Mock request to simulate Reconcile() being called on an event for a watched resource 
     req := simulateRequest(provisioning)
@@ -70,9 +70,8 @@ func TestProvisioningController(t *testing.T) {
     }
 
    // Test 1: Check the job was created with the expected name
-   jobClient := r.clientset.BatchV1().Jobs(namespace)
-   job, err := jobClient.Get("kud-test-cluster", metav1.GetOptions{})
-
+    job := &batchv1.Job{}
+    err = r.client.Get(context.TODO(), types.NamespacedName{Name: "kud-test-cluster", Namespace: namespace}, job)
     if err != nil {
         t.Fatalf("Error occured while getting job: (%v)", err)
     }
index 282641d..088c10d 100644 (file)
@@ -334,10 +334,10 @@ k8s.io/apimachinery/pkg/apis/meta/v1
 k8s.io/apimachinery/pkg/runtime/schema
 k8s.io/apimachinery/pkg/api/errors
 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured
+k8s.io/apimachinery/pkg/types
 k8s.io/apimachinery/pkg/runtime/serializer
 k8s.io/apimachinery/pkg/watch
 k8s.io/apimachinery/pkg/util/wait
-k8s.io/apimachinery/pkg/types
 k8s.io/apimachinery/pkg/api/meta
 k8s.io/apimachinery/pkg/util/runtime
 k8s.io/apimachinery/pkg/api/resource
@@ -375,8 +375,8 @@ k8s.io/apimachinery/third_party/forked/golang/json
 k8s.io/client-go/plugin/pkg/client/auth
 k8s.io/client-go/rest
 k8s.io/client-go/dynamic
-k8s.io/client-go/kubernetes
 k8s.io/client-go/discovery
+k8s.io/client-go/kubernetes
 k8s.io/client-go/kubernetes/scheme
 k8s.io/client-go/tools/cache
 k8s.io/client-go/restmapper
@@ -396,6 +396,9 @@ k8s.io/client-go/tools/clientcmd
 k8s.io/client-go/tools/leaderelection
 k8s.io/client-go/tools/leaderelection/resourcelock
 k8s.io/client-go/tools/record
+k8s.io/client-go/util/workqueue
+k8s.io/client-go/dynamic/fake
+k8s.io/client-go/kubernetes/fake
 k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1
 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1
 k8s.io/client-go/kubernetes/typed/apps/v1
@@ -428,9 +431,6 @@ k8s.io/client-go/kubernetes/typed/settings/v1alpha1
 k8s.io/client-go/kubernetes/typed/storage/v1
 k8s.io/client-go/kubernetes/typed/storage/v1alpha1
 k8s.io/client-go/kubernetes/typed/storage/v1beta1
-k8s.io/client-go/util/workqueue
-k8s.io/client-go/dynamic/fake
-k8s.io/client-go/kubernetes/fake
 k8s.io/client-go/tools/pager
 k8s.io/client-go/util/buffer
 k8s.io/client-go/util/retry
@@ -503,6 +503,7 @@ sigs.k8s.io/controller-runtime/pkg/runtime/signals
 sigs.k8s.io/controller-runtime/pkg/runtime/scheme
 sigs.k8s.io/controller-runtime/pkg/client
 sigs.k8s.io/controller-runtime/pkg/controller
+sigs.k8s.io/controller-runtime/pkg/controller/controllerutil
 sigs.k8s.io/controller-runtime/pkg/handler
 sigs.k8s.io/controller-runtime/pkg/reconcile
 sigs.k8s.io/controller-runtime/pkg/source
diff --git a/cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go b/cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/controllerutil.go
new file mode 100644 (file)
index 0000000..d918eea
--- /dev/null
@@ -0,0 +1,178 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllerutil
+
+import (
+       "context"
+       "fmt"
+       "reflect"
+
+       "k8s.io/apimachinery/pkg/api/errors"
+       "k8s.io/apimachinery/pkg/apis/meta/v1"
+       "k8s.io/apimachinery/pkg/runtime"
+       "k8s.io/apimachinery/pkg/runtime/schema"
+       "sigs.k8s.io/controller-runtime/pkg/client"
+       "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+)
+
+// AlreadyOwnedError is an error returned if the object you are trying to assign
+// a controller reference is already owned by another controller Object is the
+// subject and Owner is the reference for the current owner
+type AlreadyOwnedError struct {
+       Object v1.Object
+       Owner  v1.OwnerReference
+}
+
+func (e *AlreadyOwnedError) Error() string {
+       return fmt.Sprintf("Object %s/%s is already owned by another %s controller %s", e.Object.GetNamespace(), e.Object.GetName(), e.Owner.Kind, e.Owner.Name)
+}
+
+func newAlreadyOwnedError(Object v1.Object, Owner v1.OwnerReference) *AlreadyOwnedError {
+       return &AlreadyOwnedError{
+               Object: Object,
+               Owner:  Owner,
+       }
+}
+
+// SetControllerReference sets owner as a Controller OwnerReference on owned.
+// This is used for garbage collection of the owned object and for
+// reconciling the owner object on changes to owned (with a Watch + EnqueueRequestForOwner).
+// Since only one OwnerReference can be a controller, it returns an error if
+// there is another OwnerReference with Controller flag set.
+func SetControllerReference(owner, object v1.Object, scheme *runtime.Scheme) error {
+       ro, ok := owner.(runtime.Object)
+       if !ok {
+               return fmt.Errorf("is not a %T a runtime.Object, cannot call SetControllerReference", owner)
+       }
+
+       gvk, err := apiutil.GVKForObject(ro, scheme)
+       if err != nil {
+               return err
+       }
+
+       // Create a new ref
+       ref := *v1.NewControllerRef(owner, schema.GroupVersionKind{Group: gvk.Group, Version: gvk.Version, Kind: gvk.Kind})
+
+       existingRefs := object.GetOwnerReferences()
+       fi := -1
+       for i, r := range existingRefs {
+               if referSameObject(ref, r) {
+                       fi = i
+               } else if r.Controller != nil && *r.Controller {
+                       return newAlreadyOwnedError(object, r)
+               }
+       }
+       if fi == -1 {
+               existingRefs = append(existingRefs, ref)
+       } else {
+               existingRefs[fi] = ref
+       }
+
+       // Update owner references
+       object.SetOwnerReferences(existingRefs)
+       return nil
+}
+
+// Returns true if a and b point to the same object
+func referSameObject(a, b v1.OwnerReference) bool {
+       aGV, err := schema.ParseGroupVersion(a.APIVersion)
+       if err != nil {
+               return false
+       }
+
+       bGV, err := schema.ParseGroupVersion(b.APIVersion)
+       if err != nil {
+               return false
+       }
+
+       return aGV == bGV && a.Kind == b.Kind && a.Name == b.Name
+}
+
+// OperationResult is the action result of a CreateOrUpdate call
+type OperationResult string
+
+const ( // They should complete the sentence "Deployment default/foo has been ..."
+       // OperationResultNone means that the resource has not been changed
+       OperationResultNone OperationResult = "unchanged"
+       // OperationResultCreated means that a new resource is created
+       OperationResultCreated OperationResult = "created"
+       // OperationResultUpdated means that an existing resource is updated
+       OperationResultUpdated OperationResult = "updated"
+)
+
+// CreateOrUpdate creates or updates the given object obj in the Kubernetes
+// cluster. The object's desired state should be reconciled with the existing
+// state using the passed in ReconcileFn. obj must be a struct pointer so that
+// obj can be updated with the content returned by the Server.
+//
+// It returns the executed operation and an error.
+func CreateOrUpdate(ctx context.Context, c client.Client, obj runtime.Object, f MutateFn) (OperationResult, error) {
+       // op is the operation we are going to attempt
+       op := OperationResultNone
+
+       // get the existing object meta
+       metaObj, ok := obj.(v1.Object)
+       if !ok {
+               return OperationResultNone, fmt.Errorf("%T does not implement metav1.Object interface", obj)
+       }
+
+       // retrieve the existing object
+       key := client.ObjectKey{
+               Name:      metaObj.GetName(),
+               Namespace: metaObj.GetNamespace(),
+       }
+       err := c.Get(ctx, key, obj)
+
+       // reconcile the existing object
+       existing := obj.DeepCopyObject()
+       existingObjMeta := existing.(v1.Object)
+       existingObjMeta.SetName(metaObj.GetName())
+       existingObjMeta.SetNamespace(metaObj.GetNamespace())
+
+       if e := f(obj); e != nil {
+               return OperationResultNone, e
+       }
+
+       if metaObj.GetName() != existingObjMeta.GetName() {
+               return OperationResultNone, fmt.Errorf("ReconcileFn cannot mutate objects name")
+       }
+
+       if metaObj.GetNamespace() != existingObjMeta.GetNamespace() {
+               return OperationResultNone, fmt.Errorf("ReconcileFn cannot mutate objects namespace")
+       }
+
+       if errors.IsNotFound(err) {
+               err = c.Create(ctx, obj)
+               op = OperationResultCreated
+       } else if err == nil {
+               if reflect.DeepEqual(existing, obj) {
+                       return OperationResultNone, nil
+               }
+               err = c.Update(ctx, obj)
+               op = OperationResultUpdated
+       } else {
+               return OperationResultNone, err
+       }
+
+       if err != nil {
+               op = OperationResultNone
+       }
+       return op, err
+}
+
+// MutateFn is a function which mutates the existing object into it's desired state.
+type MutateFn func(existing runtime.Object) error
diff --git a/cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go b/cmd/bpa-operator/vendor/sigs.k8s.io/controller-runtime/pkg/controller/controllerutil/doc.go
new file mode 100644 (file)
index 0000000..ab386b2
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+Copyright 2018 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+/*
+Package controllerutil contains utility functions for working with and implementing Controllers.
+*/
+package controllerutil