--- /dev/null
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+ name: bpa-remote
+ labels:
+ cluster: bpa-remote
+ owner: c1
+spec:
+ masters:
+ - remote-compute:
+ mac-address: 08:00:27:ef:ab:60
+ KUDPlugins:
+ - bpa
+ PodSubnet: 10.244.64.0/18
--- /dev/null
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+ name: remote-vvm110
+ labels:
+ cluster: remote-vvm110
+ cluster-type: virtlet-vm
+ owner: c1
+spec:
+ masters:
+ - master-1:
+ mac-address: c2:b4:57:49:47:f1
+ PodSubnet: 172.21.64.0/18
--- /dev/null
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+ name: remote-vvm115
+ labels:
+ cluster: remote-vvm115
+ cluster-type: virtlet-vm
+ owner: c1
+spec:
+ masters:
+ - master-1:
+ mac-address: c2:b4:57:49:47:f3
+ - master-2:
+ mac-address: c2:b4:57:49:47:f4
+ - master-3:
+ mac-address: c2:b4:57:49:47:f5
+ workers:
+ - worker-1:
+ mac-address: c2:b4:57:49:47:f6
+ - worker-2:
+ mac-address: c2:b4:57:49:47:f7
+ PodSubnet: 172.21.64.0/18
--- /dev/null
+#!/bin/bash
+
+printf "\n\nStart Remote Virtlet VM provisioning E2E test\n\n"
+
+# remote compute provisioned and kube config available
+source ~/ICN/latest/icn/env/lib/common.sh
+CLUSTER_NAME=bpa-remote
+KUBECONFIG=--kubeconfig=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf
+APISERVER=$(kubectl ${KUBECONFIG} config view --minify -o jsonpath='{.clusters[0].cluster.server}')
+TOKEN=$(kubectl ${KUBECONFIG} get secret $(kubectl ${KUBECONFIG} get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode )
+call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure
+ret=$?
+if [[ $ret != 0 ]];
+then
+ printf "\nRemote Kubernetes Cluster Install did not complete successfully\n"
+else
+ printf "\niRemote Kubernetes Cluster Install was successful\n"
+fi
+
+# create virtlet VM in remote compute
+printf "Create remote Virtlet VM ...\n"
+key=$(cat /opt/kud/multi-cluster/.ssh/id_rsa.pub)
+cp ../deploy/virtlet-deployment-sample.yaml bpa_remote_virtletvm.yaml
+sed -i "s|\$ssh_key|${key}|" bpa_remote_virtletvm.yaml
+kubectl ${KUBECONFIG} create -f bpa_remote_virtletvm.yaml
+
+status=""
+while [[ $status != "Running" ]]
+do
+ stats=$(kubectl ${KUBECONFIG} get pods |grep -i virtlet-deployment)
+ status=$(echo $stats | cut -d " " -f 3)
+ if [[ $status == "Err"* ]]; then
+ echo "Error creating remote Virtlet VM, test incomplete"
+ kubectl ${KUBECONFIG} delete -f bpa_remote_virtletvm.yaml
+ exit 1
+ fi
+done
+
+echo "Remote Virtlet VM is ready for provisioning"
+
+printf "\nkubectl ${KUBECONFIG} get pods $(kubectl ${KUBECONFIG} get pods |grep -i virtlet-deployment | awk '{print $1}') -o json\n"
+podjson=$(kubectl ${KUBECONFIG} get pods $(kubectl ${KUBECONFIG} get pods |grep -i virtlet-deployment | awk '{print $1}') -o json)
+printf "\n$podjson\n\n"
+
+printf "Provision remote Virtlet VM ...\n"
+kubectl ${KUBECONFIG} apply -f bpa_remote_virtletvm_cr.yaml
+
+#Check Status of remote kud job pod
+status="Running"
+
+while [[ $status == "Running" ]]
+do
+ echo "KUD install job still running"
+ sleep 2m
+ stats=$(kubectl ${KUBECONFIG} get pods |grep -i kud-)
+ status=$(echo $stats | cut -d " " -f 3)
+done
+
+if [[ $status == "Completed" ]];
+then
+ printf "KUD Install Job completed\n"
+ printf "Checking cluster status\n"
+else
+ printf "KUD Install Job failed\n"
+fi
+
+#Print logs of Job Pod
+jobPod=$(kubectl ${KUBECONFIG} get pods|grep kud-)
+podName=$(echo $jobPod | cut -d " " -f 1)
+printf "\nNow Printing Job pod logs\n"
+kubectl ${KUBECONFIG} logs $podName
+
+#printf "\n\nBeginning E2E Remote VM Test Teardown\n\n"
+
+kubectl ${KUBECONFIG} delete -f bpa_remote_virtletvm_cr.yaml
+kubectl ${KUBECONFIG} delete job kud-remotevvm
+kubectl ${KUBECONFIG} delete configmap remotevvm-configmap
+kubectl ${KUBECONFIG} delete -f bpa_remote_virtletvm.yaml
fi
popd
+# Create network attachment definition
+BPA_DIR="/tmp/bpa"
+mkdir -p $BPA_DIR
+cat <<EOF > $BPA_DIR/netattachdef-flannel-vm.yaml
+apiVersion: "k8s.cni.cncf.io/v1"
+kind: NetworkAttachmentDefinition
+metadata:
+ name: flannel-vm
+spec:
+ config: '{
+ "cniVersion": "0.3.1",
+ "name" : "cni0",
+ "plugins": [ {
+ "type": "flannel",
+ "cniVersion": "0.3.1",
+ "masterplugin": true,
+ "delegate": {
+ "isDefaultGateway": true
+ }
+ },
+ {
+ "type": "tuning"
+ }]
+ }'
+EOF
+
+cat <<EOF > $BPA_DIR/virtlet_test_vm.yaml
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: virtlet-deployment
+ labels:
+ app: virtlet
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: virtlet
+ template:
+ metadata:
+ labels:
+ app: virtlet
+ annotations:
+ VirtletLibvirtCPUSetting: |
+ mode: host-passthrough
+ # This tells CRI Proxy that this pod belongs to Virtlet runtime
+ kubernetes.io/target-runtime: virtlet.cloud
+ VirtletCloudInitUserData: |
+ ssh_pwauth: True
+ disable_root: false
+ chpasswd: {expire: False}
+ manage_resolv_conf: True
+ resolv_conf:
+ nameservers: ['8.8.8.8', '8.8.4.4']
+ users:
+ - name: root
+ gecos: User
+ primary-group: root
+ groups: users
+ lock_passwd: false
+ shell: /bin/bash
+ sudo: ALL=(ALL) NOPASSWD:ALL
+ ssh_authorized_keys:
+ $ssh_key
+ runcmd:
+ - sed -i -e 's/^#DNS=.*/DNS=8.8.8.8/g' /etc/systemd/resolved.conf
+ - systemctl daemon-reload
+ - systemctl restart systemd-resolved
+ v1.multus-cni.io/default-network: '[
+ { "name": "flannel-vm",
+ "mac": "c2:b4:57:49:47:f1" }]'
+ VirtletRootVolumeSize: 8Gi
+ VirtletVCPUCount: "2"
+ spec:
+ affinity:
+ nodeAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ nodeSelectorTerms:
+ - matchExpressions:
+ - key: extraRuntime
+ operator: In
+ values:
+ - virtlet
+ containers:
+ - name: virtlet-deployment
+ # This specifies the image to use.
+ # virtlet.cloud/ prefix is used by CRI proxy, the remaining part
+ # of the image name is prepended with https:// and used to download the image
+ image: virtlet.cloud/ubuntu/18.04
+ imagePullPolicy: IfNotPresent
+ # tty and stdin required for "kubectl attach -t" to work
+ tty: true
+ stdin: true
+ resources:
+ requests:
+ cpu: 2
+ memory: 8Gi
+ limits:
+ # This memory limit is applied to the libvirt domain definition
+ cpu: 2
+ memory: 8Gi
+EOF
+
+# Create provisioning CR file for BPA testing
+cat <<EOF > $BPA_DIR/e2e_bpa_test.yaml
+apiVersion: bpa.akraino.org/v1alpha1
+kind: Provisioning
+metadata:
+ name: vmcluster110
+ labels:
+ cluster: vmcluster110
+ cluster-type: virtlet-vm
+ owner: c1
+spec:
+ masters:
+ - master-1:
+ mac-address: c2:b4:57:49:47:f1
+ PodSubnet: 172.21.64.0/18
+EOF
+
+pushd $BPA_DIR
# create flannel-vm net-attach-def
-kubectl apply -f ../deploy/netattachdef-flannel-vm.yaml -n kube-system
+kubectl apply -f netattachdef-flannel-vm.yaml -n kube-system
# generate user ssh key
if [ ! -f "/root/.ssh/id_rsa.pub" ]; then
ssh-keygen -f /root/.ssh/id_rsa -P ""
fi
+# create ssh key secret
+kubectl create secret generic ssh-key-secret --from-file=id_rsa=/root/.ssh/id_rsa --from-file=id_rsa.pub=/root/.ssh/id_rsa.pub
+
# create virtlet vm
key=$(cat /root/.ssh/id_rsa.pub)
-cp ../deploy/virtlet-deployment-sample.yaml virtlet_test_vm.yaml
sed -i "s|\$ssh_key|${key}|" virtlet_test_vm.yaml
kubectl create -f virtlet_test_vm.yaml
printf "\n$podjson\n\n"
# create provisioning cr
-kubectl apply -f e2e_virtletvm_test_provisioning_cr.yaml
+kubectl apply -f e2e_bpa_test.yaml
+popd
sleep 2m
printf "\n\nBeginning E2E VM Test Teardown\n\n"
-kubectl delete -f e2e_virtletvm_test_provisioning_cr.yaml
-kubectl delete job kud-cluster-vm
-kubectl delete configmap cluster-vm-configmap
+kubectl delete -f e2e_bpa_test.yaml
+kubectl delete job kud-vmcluster110
+kubectl delete configmap vmcluster110-configmap
kubectl delete -f virtlet_test_vm.yaml
+rm -rf /opt/kud/multi-cluster/vmcluster110
+rm -rf $BPA_DIR
Masters []map[string]Master `json:"masters,omitempty"`
Workers []map[string]Worker `json:"workers,omitempty"`
KUDPlugins []string `json:"KUDPlugins,omitempty"`
+ PodSubnet string `json:"PodSubnet,omitempty"`
}
// ProvisioningStatus defines the observed state of Provisioning
mastersList := provisioningInstance.Spec.Masters
workersList := provisioningInstance.Spec.Workers
kudPlugins := provisioningInstance.Spec.KUDPlugins
-
+ podSubnet := provisioningInstance.Spec.PodSubnet
bareMetalHostList, _ := listBareMetalHosts(r.bmhClient)
virtletVMList, _ := listVirtletVMs(r.clientset)
err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC)
return reconcile.Result{}, err
}
+ allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
}
- allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
if clusterType == "virtlet-vm" {
- allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+ allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
}
masterString += masterLabel + "\n"
clusterData[masterTag + masterLabel] = hostIPaddress
fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC)
return reconcile.Result{}, err
}
+ allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
}
fmt.Printf("%s : %s \n", hostIPaddress, workerMAC)
- allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n"
if clusterType == "virtlet-vm" {
- allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
+ allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n"
}
workerString += workerLabel + "\n"
clusterData[workerTag + workerLabel] = hostIPaddress
return reconcile.Result{}, err
}
+ if clusterType != "virtlet-vm" {
_, err = hostFile.NewRawSection("ovn-central", masterString)
if err != nil {
fmt.Printf("Error occured while creating section \n %v", err)
fmt.Printf("Error occured while creating section \n %v", err)
return reconcile.Result{}, err
}
-
+ }
_, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master")
if err != nil {
fmt.Printf("Error occured while creating section \n %v", err)
hostFile.SaveTo(iniHostFilePath)
//Install KUD
- err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, kudPlugins, r.clientset)
+ err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, podSubnet, kudPlugins, r.clientset)
if err != nil {
fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err)
return reconcile.Result{}, err
}
//Function to create job for KUD installation
-func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, kudPlugins []string, clientset kubernetes.Interface) error{
+func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, podSubnet string, kudPlugins []string, clientset kubernetes.Interface) error{
var backOffLimit int32 = 0
var privi bool = true
- installerString := " ./installer --cluster " + clusterName
+ installerString := " ./installer --cluster " + clusterName + " --network " + podSubnet
// Check if any plugin was specified
if len(kudPlugins) > 0 {