From: ramamani yeleswarapu Date: Fri, 6 Mar 2020 20:15:09 +0000 (-0800) Subject: Modify bpa-operator for nestedk8s X-Git-Tag: v0.4.0~34 X-Git-Url: https://gerrit.akraino.org/r/gitweb?a=commitdiff_plain;h=b6ebd5a2394cd435f07aef0e378179c00e250a48;p=icn.git Modify bpa-operator for nestedk8s Signed-off-by: Ramamani Yeleswarapu Change-Id: I062c57951f22fab0b369d0b1d9a8f2e4e7ce6379 --- diff --git a/cmd/bpa-operator/e2etest/bpa_remote_compute_cr_sample.yaml b/cmd/bpa-operator/e2etest/bpa_remote_compute_cr_sample.yaml new file mode 100644 index 0000000..a2c77e0 --- /dev/null +++ b/cmd/bpa-operator/e2etest/bpa_remote_compute_cr_sample.yaml @@ -0,0 +1,14 @@ +apiVersion: bpa.akraino.org/v1alpha1 +kind: Provisioning +metadata: + name: bpa-remote + labels: + cluster: bpa-remote + owner: c1 +spec: + masters: + - remote-compute: + mac-address: 08:00:27:ef:ab:60 + KUDPlugins: + - bpa + PodSubnet: 10.244.64.0/18 diff --git a/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr.yaml b/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr.yaml new file mode 100644 index 0000000..0788a06 --- /dev/null +++ b/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr.yaml @@ -0,0 +1,13 @@ +apiVersion: bpa.akraino.org/v1alpha1 +kind: Provisioning +metadata: + name: remote-vvm110 + labels: + cluster: remote-vvm110 + cluster-type: virtlet-vm + owner: c1 +spec: + masters: + - master-1: + mac-address: c2:b4:57:49:47:f1 + PodSubnet: 172.21.64.0/18 diff --git a/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr_multi.yaml b/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr_multi.yaml new file mode 100644 index 0000000..6523053 --- /dev/null +++ b/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_cr_multi.yaml @@ -0,0 +1,22 @@ +apiVersion: bpa.akraino.org/v1alpha1 +kind: Provisioning +metadata: + name: remote-vvm115 + labels: + cluster: remote-vvm115 + cluster-type: virtlet-vm + owner: c1 +spec: + masters: + - master-1: + mac-address: c2:b4:57:49:47:f3 + - master-2: + mac-address: c2:b4:57:49:47:f4 + - master-3: + mac-address: c2:b4:57:49:47:f5 + workers: + - worker-1: + mac-address: c2:b4:57:49:47:f6 + - worker-2: + mac-address: c2:b4:57:49:47:f7 + PodSubnet: 172.21.64.0/18 diff --git a/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_verifier.sh b/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_verifier.sh new file mode 100755 index 0000000..8f54d2f --- /dev/null +++ b/cmd/bpa-operator/e2etest/bpa_remote_virtletvm_verifier.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +printf "\n\nStart Remote Virtlet VM provisioning E2E test\n\n" + +# remote compute provisioned and kube config available +source ~/ICN/latest/icn/env/lib/common.sh +CLUSTER_NAME=bpa-remote +KUBECONFIG=--kubeconfig=/opt/kud/multi-cluster/${CLUSTER_NAME}/artifacts/admin.conf +APISERVER=$(kubectl ${KUBECONFIG} config view --minify -o jsonpath='{.clusters[0].cluster.server}') +TOKEN=$(kubectl ${KUBECONFIG} get secret $(kubectl ${KUBECONFIG} get serviceaccount default -o jsonpath='{.secrets[0].name}') -o jsonpath='{.data.token}' | base64 --decode ) +call_api $APISERVER/api --header "Authorization: Bearer $TOKEN" --insecure +ret=$? +if [[ $ret != 0 ]]; +then + printf "\nRemote Kubernetes Cluster Install did not complete successfully\n" +else + printf "\niRemote Kubernetes Cluster Install was successful\n" +fi + +# create virtlet VM in remote compute +printf "Create remote Virtlet VM ...\n" +key=$(cat /opt/kud/multi-cluster/.ssh/id_rsa.pub) +cp ../deploy/virtlet-deployment-sample.yaml bpa_remote_virtletvm.yaml +sed -i "s|\$ssh_key|${key}|" bpa_remote_virtletvm.yaml +kubectl ${KUBECONFIG} create -f bpa_remote_virtletvm.yaml + +status="" +while [[ $status != "Running" ]] +do + stats=$(kubectl ${KUBECONFIG} get pods |grep -i virtlet-deployment) + status=$(echo $stats | cut -d " " -f 3) + if [[ $status == "Err"* ]]; then + echo "Error creating remote Virtlet VM, test incomplete" + kubectl ${KUBECONFIG} delete -f bpa_remote_virtletvm.yaml + exit 1 + fi +done + +echo "Remote Virtlet VM is ready for provisioning" + +printf "\nkubectl ${KUBECONFIG} get pods $(kubectl ${KUBECONFIG} get pods |grep -i virtlet-deployment | awk '{print $1}') -o json\n" +podjson=$(kubectl ${KUBECONFIG} get pods $(kubectl ${KUBECONFIG} get pods |grep -i virtlet-deployment | awk '{print $1}') -o json) +printf "\n$podjson\n\n" + +printf "Provision remote Virtlet VM ...\n" +kubectl ${KUBECONFIG} apply -f bpa_remote_virtletvm_cr.yaml + +#Check Status of remote kud job pod +status="Running" + +while [[ $status == "Running" ]] +do + echo "KUD install job still running" + sleep 2m + stats=$(kubectl ${KUBECONFIG} get pods |grep -i kud-) + status=$(echo $stats | cut -d " " -f 3) +done + +if [[ $status == "Completed" ]]; +then + printf "KUD Install Job completed\n" + printf "Checking cluster status\n" +else + printf "KUD Install Job failed\n" +fi + +#Print logs of Job Pod +jobPod=$(kubectl ${KUBECONFIG} get pods|grep kud-) +podName=$(echo $jobPod | cut -d " " -f 1) +printf "\nNow Printing Job pod logs\n" +kubectl ${KUBECONFIG} logs $podName + +#printf "\n\nBeginning E2E Remote VM Test Teardown\n\n" + +kubectl ${KUBECONFIG} delete -f bpa_remote_virtletvm_cr.yaml +kubectl ${KUBECONFIG} delete job kud-remotevvm +kubectl ${KUBECONFIG} delete configmap remotevvm-configmap +kubectl ${KUBECONFIG} delete -f bpa_remote_virtletvm.yaml diff --git a/cmd/bpa-operator/e2etest/bpa_virtletvm_verifier.sh b/cmd/bpa-operator/e2etest/bpa_virtletvm_verifier.sh index 434f49a..5ed41c6 100755 --- a/cmd/bpa-operator/e2etest/bpa_virtletvm_verifier.sh +++ b/cmd/bpa-operator/e2etest/bpa_virtletvm_verifier.sh @@ -24,17 +24,140 @@ if [ -f $CNI_PLUGINS ]; then fi popd +# Create network attachment definition +BPA_DIR="/tmp/bpa" +mkdir -p $BPA_DIR +cat < $BPA_DIR/netattachdef-flannel-vm.yaml +apiVersion: "k8s.cni.cncf.io/v1" +kind: NetworkAttachmentDefinition +metadata: + name: flannel-vm +spec: + config: '{ + "cniVersion": "0.3.1", + "name" : "cni0", + "plugins": [ { + "type": "flannel", + "cniVersion": "0.3.1", + "masterplugin": true, + "delegate": { + "isDefaultGateway": true + } + }, + { + "type": "tuning" + }] + }' +EOF + +cat < $BPA_DIR/virtlet_test_vm.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: virtlet-deployment + labels: + app: virtlet +spec: + replicas: 1 + selector: + matchLabels: + app: virtlet + template: + metadata: + labels: + app: virtlet + annotations: + VirtletLibvirtCPUSetting: | + mode: host-passthrough + # This tells CRI Proxy that this pod belongs to Virtlet runtime + kubernetes.io/target-runtime: virtlet.cloud + VirtletCloudInitUserData: | + ssh_pwauth: True + disable_root: false + chpasswd: {expire: False} + manage_resolv_conf: True + resolv_conf: + nameservers: ['8.8.8.8', '8.8.4.4'] + users: + - name: root + gecos: User + primary-group: root + groups: users + lock_passwd: false + shell: /bin/bash + sudo: ALL=(ALL) NOPASSWD:ALL + ssh_authorized_keys: + $ssh_key + runcmd: + - sed -i -e 's/^#DNS=.*/DNS=8.8.8.8/g' /etc/systemd/resolved.conf + - systemctl daemon-reload + - systemctl restart systemd-resolved + v1.multus-cni.io/default-network: '[ + { "name": "flannel-vm", + "mac": "c2:b4:57:49:47:f1" }]' + VirtletRootVolumeSize: 8Gi + VirtletVCPUCount: "2" + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: extraRuntime + operator: In + values: + - virtlet + containers: + - name: virtlet-deployment + # This specifies the image to use. + # virtlet.cloud/ prefix is used by CRI proxy, the remaining part + # of the image name is prepended with https:// and used to download the image + image: virtlet.cloud/ubuntu/18.04 + imagePullPolicy: IfNotPresent + # tty and stdin required for "kubectl attach -t" to work + tty: true + stdin: true + resources: + requests: + cpu: 2 + memory: 8Gi + limits: + # This memory limit is applied to the libvirt domain definition + cpu: 2 + memory: 8Gi +EOF + +# Create provisioning CR file for BPA testing +cat < $BPA_DIR/e2e_bpa_test.yaml +apiVersion: bpa.akraino.org/v1alpha1 +kind: Provisioning +metadata: + name: vmcluster110 + labels: + cluster: vmcluster110 + cluster-type: virtlet-vm + owner: c1 +spec: + masters: + - master-1: + mac-address: c2:b4:57:49:47:f1 + PodSubnet: 172.21.64.0/18 +EOF + +pushd $BPA_DIR # create flannel-vm net-attach-def -kubectl apply -f ../deploy/netattachdef-flannel-vm.yaml -n kube-system +kubectl apply -f netattachdef-flannel-vm.yaml -n kube-system # generate user ssh key if [ ! -f "/root/.ssh/id_rsa.pub" ]; then ssh-keygen -f /root/.ssh/id_rsa -P "" fi +# create ssh key secret +kubectl create secret generic ssh-key-secret --from-file=id_rsa=/root/.ssh/id_rsa --from-file=id_rsa.pub=/root/.ssh/id_rsa.pub + # create virtlet vm key=$(cat /root/.ssh/id_rsa.pub) -cp ../deploy/virtlet-deployment-sample.yaml virtlet_test_vm.yaml sed -i "s|\$ssh_key|${key}|" virtlet_test_vm.yaml kubectl create -f virtlet_test_vm.yaml @@ -58,7 +181,8 @@ podjson=$(kubectl get pods $(kubectl get pods |grep -i virtlet-deployment | awk printf "\n$podjson\n\n" # create provisioning cr -kubectl apply -f e2e_virtletvm_test_provisioning_cr.yaml +kubectl apply -f e2e_bpa_test.yaml +popd sleep 2m @@ -84,7 +208,9 @@ printf "$(kubectl logs $kudjob)\n" printf "\n\nBeginning E2E VM Test Teardown\n\n" -kubectl delete -f e2e_virtletvm_test_provisioning_cr.yaml -kubectl delete job kud-cluster-vm -kubectl delete configmap cluster-vm-configmap +kubectl delete -f e2e_bpa_test.yaml +kubectl delete job kud-vmcluster110 +kubectl delete configmap vmcluster110-configmap kubectl delete -f virtlet_test_vm.yaml +rm -rf /opt/kud/multi-cluster/vmcluster110 +rm -rf $BPA_DIR diff --git a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go index d6225d1..d3eb1b4 100644 --- a/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go +++ b/cmd/bpa-operator/pkg/apis/bpa/v1alpha1/provisioning_types.go @@ -16,6 +16,7 @@ type ProvisioningSpec struct { Masters []map[string]Master `json:"masters,omitempty"` Workers []map[string]Worker `json:"workers,omitempty"` KUDPlugins []string `json:"KUDPlugins,omitempty"` + PodSubnet string `json:"PodSubnet,omitempty"` } // ProvisioningStatus defines the observed state of Provisioning diff --git a/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go b/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go index b4e9577..8d8436b 100644 --- a/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go +++ b/cmd/bpa-operator/pkg/controller/provisioning/provisioning_controller.go @@ -192,7 +192,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. mastersList := provisioningInstance.Spec.Masters workersList := provisioningInstance.Spec.Workers kudPlugins := provisioningInstance.Spec.KUDPlugins - + podSubnet := provisioningInstance.Spec.PodSubnet bareMetalHostList, _ := listBareMetalHosts(r.bmhClient) virtletVMList, _ := listVirtletVMs(r.clientset) @@ -254,11 +254,11 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. err = fmt.Errorf("IP address not found for host with MAC address %s \n", masterMAC) return reconcile.Result{}, err } + allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n" } - allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n" if clusterType == "virtlet-vm" { - allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n" + allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n" } masterString += masterLabel + "\n" clusterData[masterTag + masterLabel] = hostIPaddress @@ -337,12 +337,12 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. fmt.Errorf("IP address not found for host with MAC address %s \n", workerMAC) return reconcile.Result{}, err } + allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n" } fmt.Printf("%s : %s \n", hostIPaddress, workerMAC) - allString += workerLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + "\n" if clusterType == "virtlet-vm" { - allString = masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n" + allString += masterLabel + " ansible_ssh_host=" + hostIPaddress + " ansible_ssh_port=22" + " ansible_ssh_user=root" + " ansible_ssh_pass=root" + "\n" } workerString += workerLabel + "\n" clusterData[workerTag + workerLabel] = hostIPaddress @@ -414,6 +414,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. return reconcile.Result{}, err } + if clusterType != "virtlet-vm" { _, err = hostFile.NewRawSection("ovn-central", masterString) if err != nil { fmt.Printf("Error occured while creating section \n %v", err) @@ -431,7 +432,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. fmt.Printf("Error occured while creating section \n %v", err) return reconcile.Result{}, err } - + } _, err = hostFile.NewRawSection("k8s-cluster:children", "kube-node\n" + "kube-master") if err != nil { fmt.Printf("Error occured while creating section \n %v", err) @@ -443,7 +444,7 @@ func (r *ReconcileProvisioning) Reconcile(request reconcile.Request) (reconcile. hostFile.SaveTo(iniHostFilePath) //Install KUD - err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, kudPlugins, r.clientset) + err = createKUDinstallerJob(clusterName, request.Namespace, clusterLabel, podSubnet, kudPlugins, r.clientset) if err != nil { fmt.Printf("Error occured while creating KUD Installer job for cluster %v\n ERROR: %v", clusterName, err) return reconcile.Result{}, err @@ -624,12 +625,12 @@ func getConfigMapData(namespace, clusterName string, clientset kubernetes.Interf } //Function to create job for KUD installation -func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, kudPlugins []string, clientset kubernetes.Interface) error{ +func createKUDinstallerJob(clusterName, namespace string, labels map[string]string, podSubnet string, kudPlugins []string, clientset kubernetes.Interface) error{ var backOffLimit int32 = 0 var privi bool = true - installerString := " ./installer --cluster " + clusterName + installerString := " ./installer --cluster " + clusterName + " --network " + podSubnet // Check if any plugin was specified if len(kudPlugins) > 0 {