kud_bm_deploy_e2e:
pushd $(KUD_PATH) && ./kud_bm_launch.sh bm && popd
+kud_vm_deploy:
+ pushd $(KUD_PATH) && ./kud_bm_launch.sh vm && popd
+
kud_bm_reset:
pushd $(KUD_PATH) && ./kud_bm_launch.sh reset && popd
bpa_op_e2e_bmh:
pushd $(BPA_OPERATOR) && make e2etest_bmh && popd
+bpa_op_e2e_virtletvm:
+ pushd $(BPA_OPERATOR) && make e2etest_virtletvm && popd
+
bpa_op_unit:
pushd $(BPA_OPERATOR) && make unit_test && popd
bpa_op_bmh_verifier: bpa_op_install_bmh_e2e bpa_op_e2e_bmh
+bpa_op_virtletvm_verifier: bpa_op_install bpa_op_e2e_virtletvm
+
bpa_op_all: bm_all bpa_op_install
bpa_rest_api_install:
verifier: verify_all
verify_nestedk8s: prerequisite \
- kud_bm_deploy \
- sdwan_verifier
+ kud_vm_deploy \
+ sdwan_verifier \
+ bpa_op_virtletvm_verifier
bm_verify_nestedk8s: prerequisite \
kud_bm_deploy_e2e \
.PHONY: e2etest_bmh
e2etest_bmh:
./e2etest/bpa_bmh_verifier.sh
+
+.PHONY: e2etest_virtletvm
+e2etest_virtletvm:
+ cd e2etest && ./bpa_virtletvm_verifier.sh
spec:
config: '{
"cniVersion": "0.3.1",
- "name" : "flannel-vm",
+ "name" : "cni0",
"plugins": [ {
"type": "flannel",
"cniVersion": "0.3.1",
ssh_pwauth: True
disable_root: false
chpasswd: {expire: False}
+ manage_resolv_conf: True
+ resolv_conf:
+ nameservers: ['8.8.8.8', '8.8.4.4']
users:
- name: root
gecos: User
shell: /bin/bash
sudo: ALL=(ALL) NOPASSWD:ALL
ssh_authorized_keys:
- # SSH key goes here
+ $ssh_key
runcmd:
- - echo hello world
+ - sed -i -e 's/^#DNS=.*/DNS=8.8.8.8/g' /etc/systemd/resolved.conf
+ - systemctl daemon-reload
+ - systemctl restart systemd-resolved
v1.multus-cni.io/default-network: '[
{ "name": "flannel-vm",
"mac": "c2:b4:57:49:47:f1" }]'
- VirtletRootVolumeSize: 12Gi
+ VirtletRootVolumeSize: 8Gi
+ VirtletVCPUCount: "2"
spec:
affinity:
nodeAffinity:
resources:
requests:
cpu: 2
- memory: 12Gi
+ memory: 8Gi
limits:
# This memory limit is applied to the libvirt domain definition
cpu: 2
- memory: 12Gi
+ memory: 8Gi
--- /dev/null
+#!/bin/bash
+
+printf "\n\nStart Virtlet VM provisioning E2E test\n\n"
+
+TUNING_DIR="/tmp/tuning_dir"
+CNI_PLUGINS="cni-plugins-linux-amd64-v0.8.2.tgz"
+if !(wget https://github.com/containernetworking/plugins/releases/download/v0.8.2/$CNI_PLUGINS -P $TUNING_DIR 2>/dev/null); then
+ echo "Error downloading cni plugins for Virtlet VM provisioning"
+ exit 1
+fi
+
+pushd $TUNING_DIR
+if [ -f $CNI_PLUGINS ]; then
+ tar -xzvf $CNI_PLUGINS > /dev/null
+ if [ -f "tuning" ]; then
+ cp "tuning" "/opt/cni/bin/"
+ echo "Updated the tuning plugin"
+ else
+ echo "Error finding the latest tuning plugin"
+ rm -rf $TUNING_DIR
+ exit 1
+ fi
+ rm -rf $TUNING_DIR
+fi
+popd
+
+# create flannel-vm net-attach-def
+kubectl apply -f ../deploy/netattachdef-flannel-vm.yaml -n kube-system
+
+# generate user ssh key
+if [ ! -f "/root/.ssh/id_rsa.pub" ]; then
+ ssh-keygen -f /root/.ssh/id_rsa -P ""
+fi
+
+# create virtlet vm
+key=$(cat /root/.ssh/id_rsa.pub)
+cp ../deploy/virtlet-deployment-sample.yaml virtlet_test_vm.yaml
+sed -i "s|\$ssh_key|${key}|" virtlet_test_vm.yaml
+kubectl create -f virtlet_test_vm.yaml
+
+status=""
+while [[ $status != "Running" ]]
+do
+ stats=$(kubectl get pods |grep -i virtlet-deployment)
+ status=$(echo $stats | cut -d " " -f 3)
+ if [[ $status == "Err"* ]]; then
+ echo "Error creating Virtlet VM, test incomplete"
+ kubectl delete -f virtlet_test_vm.yaml
+ exit 1
+ fi
+done
+
+sleep 3
+echo "Virtlet VM is ready for provisioning"
+
+printf "\nkubectl get pods $(kubectl get pods |grep -i virtlet-deployment | awk '{print $1}') -o json\n"
+podjson=$(kubectl get pods $(kubectl get pods |grep -i virtlet-deployment | awk '{print $1}') -o json)
+printf "\n$podjson\n\n"
+
+# create provisioning cr
+kubectl apply -f e2e_virtletvm_test_provisioning_cr.yaml
+
+sleep 2m
+
+status="Running"
+
+while [[ $status == "Running" ]]
+do
+ stats=$(kubectl get pods |grep -i kud-cluster-vm)
+ status=$(echo $stats | cut -d " " -f 3)
+ echo "KUD install job still running"
+ sleep 2m
+done
+
+if [[ $status == "Completed" ]]; then
+ printf "KUD Install completed successfully\n"
+else
+ printf "KUD Install failed\n"
+fi
+
+printf "\nPrinting kud-cluster-vm job logs....\n\n"
+kudjob=$(kubectl get pods | grep -i kud-cluster-vm | awk '{print $1}')
+printf "$(kubectl logs $kudjob)\n"
+
+printf "\n\nBeginning E2E VM Test Teardown\n\n"
+
+kubectl delete -f e2e_virtletvm_test_provisioning_cr.yaml
+kubectl delete job kud-cluster-vm
+kubectl delete configmap cluster-vm-configmap
+kubectl delete -f virtlet_test_vm.yaml
--- /dev/null
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+ name: test-vm
+ labels:
+ cluster: cluster-vm
+ cluster-type: virtlet-vm
+ owner: c1
+spec:
+ masters:
+ - master-1:
+ mac-address: c2:b4:57:49:47:f1
}
allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
+ if clusterType == "virtlet-vm" {
+ allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+ }
masterString += masterLabel + "\n"
clusterData[masterTag + masterLabel] = hostIPaddress
}
fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
-
allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
+ if clusterType == "virtlet-vm" {
+ allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+ }
workerString += workerLabel + "\n"
clusterData[workerTag + workerLabel] = hostIPaddress
podStatusJson, _ := json.Marshal(pod.Status)
json.Unmarshal([]byte(podStatusJson), &podStatus)
- if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["v1.multus-cni.io/default-network"] != nil {
- ns := podAnnotation["v1.multus-cni.io/default-network"].(string)
+ if runtime == "virtlet.cloud" && podStatus.Phase == "Running" && podAnnotation["k8s.v1.cni.cncf.io/networks-status"] != nil {
+ ns := podAnnotation["k8s.v1.cni.cncf.io/networks-status"].(string)
json.Unmarshal([]byte(ns), &podDefaultNetStatus)
vmPodList = append(vmPodList, VirtletVM{podStatus.PodIP, podDefaultNetStatus[0].Mac})
function kud_install {
pushd $DOWNLOAD_PATH/multicloud-k8s/kud/hosting_providers/vagrant/
- if [ "$1" == "all" ]; then
+ if [ "$1" == "all" || "$1" == "vm" ]; then
sed -i -e 's/testing_enabled=${KUD_ENABLE_TESTS:-false}/testing_enabled=${KUD_ENABLE_TESTS:-true}/g' installer.sh
fi
+ if [ "$1" == "vm" ]; then
+ sed -i -e 's/^kube_pods_subnet.*/kube_pods_subnet: 172.21.64.0\/18/g' inventory/group_vars/k8s-cluster.yml
+ fi
./installer.sh | tee kud_deploy.log
if [ "$1" == "bm" ]; then