Add some files for automatic deployment from jumper host.
Add files for deploying k8s worker node.
Add some calico configuration files.
Rename the calico.sh file with setup-cni.sh file.
Change-Id: I24abd8f0f8507869064d6b9ceb0aa1f89a44f29c
Signed-off-by: Jingzhao <Jingzhao.Ni@arm.com>
+++ /dev/null
-#!/bin/bash -ex
-
-CLUSTER_IP=${1:-172.16.1.136} # Align with the value in our K8s setup script
-CALICO_URI_ROOT=https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation
-
-# Install the Etcd Database
-if [ "$(uname -m)" == 'aarch64' ]; then
- ETCD_YAML=https://raw.githubusercontent.com/Jingzhao123/arm64TemporaryCalico/temporay_arm64/v3.3/getting-started/kubernetes/installation/hosted/etcd-arm64.yaml
-else
- ETCD_YAML=${CALICO_URI_ROOT}/hosted/etcd.yaml
-fi
-wget -O etcd.yaml "${ETCD_YAML}"
-sed -i "s/10.96.232.136/${CLUSTER_IP}/" etcd.yaml
-kubectl apply -f etcd.yaml
-
-# Install the RBAC Roles required for Calico
-kubectl apply -f "${CALICO_URI_ROOT}/rbac.yaml"
-
-# Install Calico to system
-wget -O calico.yaml "${CALICO_URI_ROOT}/hosted/calico.yaml"
-sed -i "s/10.96.232.136/${CLUSTER_IP}/" calico.yaml
-if [ "$(uname -m)" == 'aarch64' ]; then
- sed -i "s/quay.io\/calico/calico/" calico.yaml
-fi
-# FIXME: IP_AUTODETECTION_METHOD?
-kubectl apply -f calico.yaml
-
-# Remove the taints on master node
-kubectl taint nodes --all node-role.kubernetes.io/master- || true
--- /dev/null
+# Calico Version v3.3.2
+# https://docs.projectcalico.org/v3.3/releases#v3.3.2
+# This manifest includes the following component versions:
+# calico/node:v3.3.2
+# calico/cni:v3.3.2
+# calico/kube-controllers:v3.3.2
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # Configure this with the location of your etcd cluster.
+ etcd_endpoints: "http://10.96.232.136:6666"
+
+ # If you're using TLS enabled etcd uncomment the following.
+ # You must also populate the Secret below with these files.
+ etcd_ca: "" # "/calico-secrets/etcd-ca"
+ etcd_cert: "" # "/calico-secrets/etcd-cert"
+ etcd_key: "" # "/calico-secrets/etcd-key"
+ # Configure the Calico backend to use.
+ calico_backend: "bird"
+
+ # Configure the MTU to use
+ veth_mtu: "1440"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.0",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "etcd_endpoints": "__ETCD_ENDPOINTS__",
+ "etcd_key_file": "__ETCD_KEY_FILE__",
+ "etcd_cert_file": "__ETCD_CERT_FILE__",
+ "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+
+---
+
+
+# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
+# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
+apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+ name: calico-etcd-secrets
+ namespace: kube-system
+data:
+ # Populate the following files with etcd TLS configuration if desired, but leave blank if
+ # not using TLS for etcd.
+ # This self-hosted install expects three files with the following names. The values
+ # should be base64 encoded strings of the entire contents of each file.
+ # etcd-key: null
+ # etcd-cert: null
+ # etcd-ca: null
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical add-on, ensuring it gets
+ # priority scheduling and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ containers:
+ # Runs calico/node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: calico/node:v3.3.2
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # Location of the CA certificate for etcd.
+ - name: ETCD_CA_CERT_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_ca
+ # Location of the client key for etcd.
+ - name: ETCD_KEY_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_key
+ # Location of the client certificate for etcd.
+ - name: ETCD_CERT_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_cert
+ # Set noderef for node controller.
+ - name: CALICO_K8S_NODE_REF
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s,bgp"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: "autodetect"
+ - name: IP_AUTODETECTION_METHOD
+ value: "can-reach=www.google.com"
+ # Enable IPIP
+ - name: CALICO_IPV4POOL_IPIP
+ value: "Always"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # The default IPv4 pool to create on startup if none exists. Pod IPs will be
+ # chosen from this range. Changing this value after installation will have
+ # no effect. This should fall within `--cluster-cidr`.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ host: localhost
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -bird-ready
+ - -felix-ready
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ - mountPath: /calico-secrets
+ name: etcd-certs
+ # This container installs the Calico CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: calico/cni:v3.3.2
+ command: ["/install-cni.sh"]
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # CNI MTU Config variable
+ - name: CNI_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ - mountPath: /calico-secrets
+ name: etcd-certs
+ volumes:
+ # Used by calico/node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+ # Mount in the etcd TLS secrets with mode 400.
+ # See https://kubernetes.io/docs/concepts/configuration/secret/
+ - name: etcd-certs
+ secret:
+ secretName: calico-etcd-secrets
+ defaultMode: 0400
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+
+---
+
+# This manifest deploys the Calico Kubernetes controllers.
+# See https://github.com/projectcalico/kube-controllers
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+spec:
+ # The controllers can only have a single active instance.
+ replicas: 1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ # The controllers must run in the host network namespace so that
+ # it isn't governed by policy that would prevent it from working.
+ hostNetwork: true
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ serviceAccountName: calico-kube-controllers
+ containers:
+ - name: calico-kube-controllers
+ image: calico/kube-controllers:v3.3.2
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # Location of the CA certificate for etcd.
+ - name: ETCD_CA_CERT_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_ca
+ # Location of the client key for etcd.
+ - name: ETCD_KEY_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_key
+ # Location of the client certificate for etcd.
+ - name: ETCD_CERT_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_cert
+ # Choose which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: policy,namespace,serviceaccount,workloadendpoint,node
+ volumeMounts:
+ # Mount in the etcd TLS secrets.
+ - mountPath: /calico-secrets
+ name: etcd-certs
+ readinessProbe:
+ exec:
+ command:
+ - /usr/bin/check-status
+ - -r
+ volumes:
+ # Mount in the etcd TLS secrets with mode 400.
+ # See https://kubernetes.io/docs/concepts/configuration/secret/
+ - name: etcd-certs
+ secret:
+ secretName: calico-etcd-secrets
+ defaultMode: 0400
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+
--- /dev/null
+# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
+# to force it to run on the master even when the master isn't schedulable, and uses
+# nodeSelector to ensure it only runs on the master.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: calico-etcd
+ namespace: kube-system
+ labels:
+ k8s-app: calico-etcd
+spec:
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-etcd
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # This taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the Calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ # Allow this pod to run on the master.
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ hostNetwork: true
+ containers:
+ - name: calico-etcd
+ image: quay.io/coreos/etcd:v3.3.9
+ env:
+ - name: CALICO_ETCD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ command:
+ - /usr/local/bin/etcd
+ args:
+ - --name=calico
+ - --data-dir=/var/etcd/calico-data
+ - --advertise-client-urls=http://$(CALICO_ETCD_IP):6666
+ - --listen-client-urls=http://0.0.0.0:6666
+ - --listen-peer-urls=http://0.0.0.0:6667
+ - --auto-compaction-retention=1
+ volumeMounts:
+ - name: var-etcd
+ mountPath: /var/etcd
+ volumes:
+ - name: var-etcd
+ hostPath:
+ path: /var/etcd
+
+---
+
+# This manifest installs the Service which gets traffic to the Calico
+# etcd.
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: calico-etcd
+ name: calico-etcd
+ namespace: kube-system
+spec:
+ # Select the calico-etcd pod running on the master.
+ selector:
+ k8s-app: calico-etcd
+ # This ClusterIP needs to be known in advance, since we cannot rely
+ # on DNS to get access to etcd.
+ clusterIP: 10.96.232.136
+ ports:
+ - port: 6666
--- /dev/null
+# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
+# to force it to run on the master even when the master isn't schedulable, and uses
+# nodeSelector to ensure it only runs on the master.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: calico-etcd
+ namespace: kube-system
+ labels:
+ k8s-app: calico-etcd
+spec:
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-etcd
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # This taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the Calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ # Allow this pod to run on the master.
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ hostNetwork: true
+ containers:
+ - name: calico-etcd
+ image: quay.io/coreos/etcd:v3.3.9-arm64
+ env:
+ - name: CALICO_ETCD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: ETCD_UNSUPPORTED_ARCH
+ value: "arm64"
+ command:
+ - /usr/local/bin/etcd
+ args:
+ - --name=calico
+ - --data-dir=/var/etcd/calico-data
+ - --advertise-client-urls=http://$(CALICO_ETCD_IP):6666
+ - --listen-client-urls=http://0.0.0.0:6666
+ - --listen-peer-urls=http://0.0.0.0:6667
+ - --auto-compaction-retention=1
+ volumeMounts:
+ - name: var-etcd
+ mountPath: /var/etcd
+ volumes:
+ - name: var-etcd
+ hostPath:
+ path: /var/etcd
+
+---
+
+# This manifest installs the Service which gets traffic to the Calico
+# etcd.
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: calico-etcd
+ name: calico-etcd
+ namespace: kube-system
+spec:
+ # Select the calico-etcd pod running on the master.
+ selector:
+ k8s-app: calico-etcd
+ # This ClusterIP needs to be known in advance, since we cannot rely
+ # on DNS to get access to etcd.
+ clusterIP: 10.96.232.136
+ ports:
+ - port: 6666
--- /dev/null
+# Calico Version v3.3.2
+# https://docs.projectcalico.org/v3.3/releases#v3.3.2
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-kube-controllers
+rules:
+ - apiGroups:
+ - ""
+ - extensions
+ resources:
+ - pods
+ - namespaces
+ - networkpolicies
+ - nodes
+ - serviceaccounts
+ verbs:
+ - watch
+ - list
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-node
+rules:
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+- kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
sudo apt update
sudo apt install -y \
kubelet=${KUBE_VERSION} kubeadm=${KUBE_VERSION} kubectl=${KUBE_VERSION}
-apt-mark hold kubelet kubeadm kubectl
+sudo apt-mark hold kubelet kubeadm kubectl
_conf='/etc/sysctl.d/99-akraino-iec.conf'
echo 'net.bridge.bridge-nf-call-iptables = 1' |& sudo tee "${_conf}"
--- /dev/null
+#!/bin/bash
+set -o xtrace
+set -e
+
+#install essential software
+source k8s_common.sh
+
+# install kubernetes
--- /dev/null
+#!/bin/bash
+set -o xtrace
+set -e
+
+CLUSTER_IP=${1:-172.16.1.136} # Align with the value in our K8s setup script
+
+# Install the Etcd Database
+if [ "$(uname -m)" == 'aarch64' ]; then
+ ETCD_YAML=etcd-arm64.yaml
+else
+ ETCD_YAML=etcd-amd64.yaml
+if
+
+sed -i "s/10.96.232.136/${CLUSTER_IP}/" "cni/calico/${ETCD_YAML}"
+kubectl apply -f "cni/calico/${ETCD_YAML}"
+
+# Install the RBAC Roles required for Calico
+kubectl apply -f "cni/calico/rbac.yaml"
+
+# Install Calico to system
+sed -i "s/10.96.232.136/${CLUSTER_IP}/" cni/calico/calico.yaml
+kubectl apply -f cni/calico/calico.yaml
+
+# Remove the taints on master node
+kubectl taint nodes --all node-role.kubernetes.io/master- || true
--- /dev/null
+#!/bin/bash
+#Install the k8s-master & k8s-worker node from Mgnt node
+#
+set -e
+
+#
+# Displays the help menu.
+#
+display_help () {
+ echo "Usage: $0 [master ip] [worker ip] [user] [password] "
+ echo " "
+ echo "There should be an user which will be used to install the "
+ echo "corresponding software on master & worker node. This user can "
+ echo "run the sudo command without input password on the hosts."
+ echo " "
+ echo "Example usages:"
+ echo " ./startup.sh 10.169.40.171 10.169.41.172 iec 123456"
+}
+
+
+
+#
+# Deploy k8s with calico.
+#
+deploy_k8s () {
+ set -o xtrace
+
+ INSTALL_SOFTWARE="sudo apt-get update && sudo apt-get install -y git &&\
+ sudo rm -rf ~/.kube ~/iec &&\
+ git clone ${REPO_URL} &&\
+ cd iec/scripts/ && source k8s_common.sh"
+
+ #Automatic deploy the K8s environments on Master node
+ SETUP_MASTER="cd iec/scripts/ && source k8s_master.sh ${K8S_MASTER_IP}"
+ sshpass -p ${K8S_MASTERPW} ssh ${HOST_USER}@${K8S_MASTER_IP} ${INSTALL_SOFTWARE}
+ sshpass -p ${K8S_MASTERPW} ssh ${HOST_USER}@${K8S_MASTER_IP} ${SETUP_MASTER} | tee kubeadm.log
+
+ KUBEADM_JOIN_CMD=$(grep "kubeadm join " ./kubeadm.log)
+
+ #Automatic deploy the K8s environments on Worker node
+ SETUP_WORKER="cd iec/scripts/ && source k8s_worker.sh"
+ sshpass -p ${K8S_WORKERPW} ssh ${HOST_USER}@${K8S_WORKER01_IP} ${INSTALL_SOFTWARE}
+ sshpass -p ${K8S_WORKERPW} ssh ${HOST_USER}@${K8S_WORKER01_IP} "echo \"sudo ${KUBEADM_JOIN_CMD}\" >> ./iec/scripts/k8s_worker.sh"
+ sshpass -p ${K8S_WORKERPW} ssh ${HOST_USER}@${K8S_WORKER01_IP} ${SETUP_WORKER}
+
+ #Deploy etcd & CNI from master node
+ #There may be more options in future. e.g: Calico, Contiv-vpp, Ovn-k8s ...
+ SETUP_CNI="cd iec/scripts && source setup-cni.sh"
+ sshpass -p ${K8S_MASTERPW} ssh ${HOST_USER}@${K8S_MASTER_IP} ${SETUP_CNI}
+}
+
+
+PASSWD=${4:-"123456"}
+HOST_USER=${3:-"iec"}
+
+K8S_MASTER_IP=${1:-"10.169.40.171"}
+K8S_MASTERPW=${PASSWD}
+
+K8S_WORKER01_IP=${2:-"10.169.41.172"}
+K8S_WORKERPW=${PASSWD}
+
+REPO_URL="https://gerrit.akraino.org/r/iec"
+LOG_FILE="kubeadm.log"
+
+if [ -f "./${LOG_FILE}" ]; then
+ rm "${LOG_FILE}"
+fi
+
+#
+# Init
+#
+if [ $# -lt 4 ]
+then
+ display_help
+ exit 0
+fi
+
+
+deploy_k8s