Add e2e script for Virtlet VM provisioning 65/1865/33
authorramamani yeleswarapu <ramamani.yeleswarapu@intel.com>
Mon, 28 Oct 2019 22:07:48 +0000 (15:07 -0700)
committerramamani yeleswarapu <ramamani.yeleswarapu@intel.com>
Sat, 23 Nov 2019 03:33:41 +0000 (19:33 -0800)
- Adds e2e script, provisioning cr yaml for testing
  Virtlet VM provisioning.
- Modifies Makefiles.
- Modifies kud_bm_launch.sh.

Signed-off-by: Ramamani Yeleswarapu <ramamani.yeleswarapu@intel.com>
Change-Id: I6a9714b3a5c2b7a71566ce854a7b8d52e637bb17

Makefile
cmd/bpa-operator/Makefile
cmd/bpa-operator/deploy/netattachdef-flannel-vm.yaml
cmd/bpa-operator/deploy/virtlet-deployment-sample.yaml
cmd/bpa-operator/e2etest/bpa_virtletvm_verifier.sh [new file with mode: 0755]
cmd/bpa-operator/e2etest/e2e_virtletvm_test_provisioning_cr.yaml [new file with mode: 0644]
cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go
deploy/kud/kud_bm_launch.sh

index 3fca41b..22bde16 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -68,6 +68,9 @@ kud_bm_deploy:
 kud_bm_deploy_e2e:
        pushd $(KUD_PATH) && ./kud_bm_launch.sh bm && popd
 
+kud_vm_deploy:
+       pushd $(KUD_PATH) && ./kud_bm_launch.sh vm && popd
+
 kud_bm_reset:
        pushd $(KUD_PATH) && ./kud_bm_launch.sh reset && popd
 
@@ -95,6 +98,9 @@ bpa_op_e2e_vm:
 bpa_op_e2e_bmh:
        pushd $(BPA_OPERATOR) && make e2etest_bmh && popd
 
+bpa_op_e2e_virtletvm:
+       pushd $(BPA_OPERATOR) && make e2etest_virtletvm && popd
+
 bpa_op_unit:
        pushd $(BPA_OPERATOR) && make unit_test && popd
 
@@ -102,6 +108,8 @@ bpa_op_vm_verifier: bpa_op_install bpa_op_e2e_vm
 
 bpa_op_bmh_verifier: bpa_op_install_bmh_e2e bpa_op_e2e_bmh
 
+bpa_op_virtletvm_verifier: bpa_op_install bpa_op_e2e_virtletvm
+
 bpa_op_all: bm_all bpa_op_install
 
 bpa_rest_api_install:
@@ -144,8 +152,9 @@ verify_all: prerequisite \
 verifier: verify_all
 
 verify_nestedk8s: prerequisite \
-       kud_bm_deploy \
-       sdwan_verifier
+       kud_vm_deploy \
+       sdwan_verifier \
+       bpa_op_virtletvm_verifier
 
 bm_verify_nestedk8s: prerequisite \
         kud_bm_deploy_e2e \
index 0886aea..07f0735 100644 (file)
@@ -63,3 +63,7 @@ e2etest_vm:
 .PHONY: e2etest_bmh
 e2etest_bmh:
        ./e2etest/bpa_bmh_verifier.sh
+
+.PHONY: e2etest_virtletvm
+e2etest_virtletvm:
+       cd e2etest && ./bpa_virtletvm_verifier.sh
index 4cdf089..b2ff402 100644 (file)
@@ -5,7 +5,7 @@ metadata:
 spec:
   config: '{
             "cniVersion": "0.3.1",
-            "name" : "flannel-vm",
+            "name" : "cni0",
             "plugins": [ {
               "type": "flannel",
               "cniVersion": "0.3.1",
index cbd0122..6a5b925 100644 (file)
@@ -22,6 +22,9 @@ spec:
           ssh_pwauth: True
           disable_root: false
           chpasswd: {expire: False}
+          manage_resolv_conf: True
+          resolv_conf:
+            nameservers: ['8.8.8.8', '8.8.4.4']
           users:
           - name: root
             gecos: User
@@ -31,13 +34,16 @@ spec:
             shell: /bin/bash
             sudo: ALL=(ALL) NOPASSWD:ALL
             ssh_authorized_keys:
-            # SSH key goes here
+              $ssh_key
           runcmd:
-            - echo hello world
+            - sed -i -e 's/^#DNS=.*/DNS=8.8.8.8/g' /etc/systemd/resolved.conf
+            - systemctl daemon-reload
+            - systemctl restart systemd-resolved
         v1.multus-cni.io/default-network: '[
             { "name": "flannel-vm",
               "mac": "c2:b4:57:49:47:f1" }]'
-        VirtletRootVolumeSize: 12Gi
+        VirtletRootVolumeSize: 8Gi
+        VirtletVCPUCount: "2"
     spec:
       affinity:
         nodeAffinity:
@@ -61,8 +67,8 @@ spec:
         resources:
           requests:
             cpu: 2
-            memory: 12Gi
+            memory: 8Gi
           limits:
             # This memory limit is applied to the libvirt domain definition
             cpu: 2
-            memory: 12Gi
+            memory: 8Gi
diff --git a/cmd/bpa-operator/e2etest/bpa_virtletvm_verifier.sh b/cmd/bpa-operator/e2etest/bpa_virtletvm_verifier.sh
new file mode 100755 (executable)
index 0000000..434f49a
--- /dev/null
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+printf "\n\nStart Virtlet VM provisioning E2E test\n\n"
+
+TUNING_DIR="/tmp/tuning_dir"
+CNI_PLUGINS="cni-plugins-linux-amd64-v0.8.2.tgz"
+if !(wget https://github.com/containernetworking/plugins/releases/download/v0.8.2/$CNI_PLUGINS -P $TUNING_DIR 2>/dev/null); then
+    echo "Error downloading cni plugins for Virtlet VM provisioning"
+    exit 1
+fi
+
+pushd $TUNING_DIR
+if [ -f $CNI_PLUGINS ]; then
+    tar -xzvf $CNI_PLUGINS > /dev/null
+    if [ -f "tuning" ]; then
+       cp "tuning" "/opt/cni/bin/"
+       echo "Updated the tuning plugin"
+    else
+       echo "Error finding the latest tuning plugin"
+       rm -rf $TUNING_DIR
+       exit 1
+    fi
+    rm -rf $TUNING_DIR
+fi
+popd
+
+# create flannel-vm net-attach-def
+kubectl apply -f ../deploy/netattachdef-flannel-vm.yaml -n kube-system
+
+# generate user ssh key
+if [ ! -f "/root/.ssh/id_rsa.pub" ]; then
+    ssh-keygen -f /root/.ssh/id_rsa -P ""
+fi
+
+# create virtlet vm
+key=$(cat /root/.ssh/id_rsa.pub)
+cp ../deploy/virtlet-deployment-sample.yaml virtlet_test_vm.yaml
+sed -i "s|\$ssh_key|${key}|" virtlet_test_vm.yaml
+kubectl create -f virtlet_test_vm.yaml
+
+status=""
+while [[ $status != "Running" ]]
+do
+       stats=$(kubectl get pods |grep -i virtlet-deployment)
+       status=$(echo $stats | cut -d " " -f 3)
+       if [[ $status == "Err"* ]]; then
+               echo "Error creating Virtlet VM, test incomplete"
+               kubectl delete -f virtlet_test_vm.yaml
+               exit 1
+       fi
+done
+
+sleep 3
+echo "Virtlet VM is ready for provisioning"
+
+printf "\nkubectl get pods $(kubectl get pods |grep -i virtlet-deployment | awk '{print $1}') -o json\n"
+podjson=$(kubectl get pods $(kubectl get pods |grep -i virtlet-deployment | awk '{print $1}') -o json)
+printf "\n$podjson\n\n"
+
+# create provisioning cr
+kubectl apply -f e2e_virtletvm_test_provisioning_cr.yaml
+
+sleep 2m
+
+status="Running"
+
+while [[ $status == "Running" ]]
+do
+       stats=$(kubectl get pods |grep -i kud-cluster-vm)
+       status=$(echo $stats | cut -d " " -f 3)
+       echo "KUD install job still running"
+       sleep 2m
+done
+
+if [[ $status == "Completed" ]]; then
+   printf "KUD Install completed successfully\n"
+else
+   printf "KUD Install failed\n"
+fi
+
+printf "\nPrinting kud-cluster-vm job logs....\n\n"
+kudjob=$(kubectl get pods | grep -i kud-cluster-vm | awk '{print $1}')
+printf "$(kubectl logs $kudjob)\n"
+
+printf "\n\nBeginning E2E VM Test Teardown\n\n"
+
+kubectl delete -f e2e_virtletvm_test_provisioning_cr.yaml
+kubectl delete job kud-cluster-vm
+kubectl delete configmap cluster-vm-configmap
+kubectl delete -f virtlet_test_vm.yaml
diff --git a/cmd/bpa-operator/e2etest/e2e_virtletvm_test_provisioning_cr.yaml b/cmd/bpa-operator/e2etest/e2e_virtletvm_test_provisioning_cr.yaml
new file mode 100644 (file)
index 0000000..4868b72
--- /dev/null
@@ -0,0 +1,12 @@
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+  name: test-vm
+  labels:
+    cluster: cluster-vm
+    cluster-type: virtlet-vm
+    owner: c1
+spec:
+  masters:
+    - master-1:
+        mac-address: c2:b4:57:49:47:f1
index 5b5cc41..b4e9577 100644 (file)
@@ -257,6 +257,9 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                       }
 
                        allString += masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
+                       if clusterType == "virtlet-vm" {
+                           allString = masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+                       }
                        masterString += masterLabel + "\n"
                        clusterData[masterTag + masterLabel] = hostIPaddress
 
@@ -337,8 +340,10 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile.
                                           }
                                            fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
 
-
                                            allString += workerLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + "\n"
+                                           if clusterType == "virtlet-vm" {
+                                               allString = masterLabel + "  ansible_ssh_host="  + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+                                           }
                                            workerString += workerLabel + "\n"
                                           clusterData[workerTag + workerLabel] = hostIPaddress
 
@@ -867,8 +872,8 @@ func listVirtletVMs(clientset kubernetes.Interface) ([]VirtletVM, error) {
                         podStatusJson, _ := json.Marshal(pod.Status)
                         json.Unmarshal([]byte(podStatusJson), &podStatus)
 
-                        if runtime  == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["v1.multus-cni.io/default-network"] != nil {
-                                ns := podAnnotation["v1.multus-cni.io/default-network"].(string)
+                        if runtime  == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
+                                ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
                                 json.Unmarshal([]byte(ns), &podDefaultNetStatus)
 
                                 vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
index c19b0bd..4122422 100755 (executable)
@@ -83,9 +83,12 @@ EOL
 
 function kud_install {
     pushd $DOWNLOAD_PATH/multicloud-k8s/kud/hosting_providers/vagrant/
-    if [ "$1" == "all" ]; then
+    if [ "$1" == "all" || "$1" == "vm" ]; then
         sed -i -e 's/testing_enabled=${KUD_ENABLE_TESTS:-false}/testing_enabled=${KUD_ENABLE_TESTS:-true}/g' installer.sh
     fi
+    if [ "$1" == "vm" ]; then
+        sed -i -e 's/^kube_pods_subnet.*/kube_pods_subnet: 172.21.64.0\/18/g' inventory/group_vars/k8s-cluster.yml
+    fi
     ./installer.sh | tee kud_deploy.log
 
     if [ "$1" == "bm" ]; then