Setup scripts of ELIOT in Ubuntu and X86 35/435/14
authorAbhijit Dasgupta <abhijit.das.gupta@huawei.com>
Tue, 5 Mar 2019 14:37:42 +0000 (14:37 +0000)
committerabhijit_onap <abhijit.das.gupta@huawei.com>
Fri, 24 May 2019 08:52:52 +0000 (08:52 +0000)
1)setup.sh:-main setup script
2)common.sh:-install docker in Eliot Manager and Edge Node
3)k8smaster.sh:-install k8s and initialise k8s master
4)k8sworker.sh:-install kubelet, kubernetes-cni and kubeadm
5)nodelist:-worker node details which are to be added.

nodelist content should be in the format:
<eliotedgenodeusername>|<eliotedgenodeip>|<eliotedgenodepassword>

Signed-off-by: Abhijit Dasgupta <abhijit.das.gupta@huawei.com>
Change-Id: I285b07350fb1338da5e06f9816abee6a604faee3
Signed-off-by: abhijit_onap <abhijit.das.gupta@huawei.com>
scripts/cni/calico/calico.yaml [new file with mode: 0644]
scripts/cni/calico/rbac.yaml [new file with mode: 0644]
scripts/common.sh [new file with mode: 0644]
scripts/k8smaster.sh [new file with mode: 0644]
scripts/k8sworker.sh [new file with mode: 0644]
scripts/nodelist [new file with mode: 0644]
scripts/setup.sh [new file with mode: 0755]

diff --git a/scripts/cni/calico/calico.yaml b/scripts/cni/calico/calico.yaml
new file mode 100644 (file)
index 0000000..a6a2d8d
--- /dev/null
@@ -0,0 +1,539 @@
+# Calico Version v3.3.4
+# https://docs.projectcalico.org/v3.3/releases#v3.3.4
+# This manifest includes the following component versions:
+#   calico/node:v3.3.4
+#   calico/cni:v3.3.4
+#
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+  name: calico-config
+  namespace: kube-system
+data:
+  # To enable Typha, set this to "calico-typha" *and*
+  # set a non-zero value for Typha replicas
+  # below.  We recommend using Typha if you have more than 50 nodes.
+  # Above 100 nodes it is essential.
+  typha_service_name: "none"
+  # Configure the Calico backend to use.
+  calico_backend: "bird"
+
+  # Configure the MTU to use
+  veth_mtu: "1440"
+
+  # The CNI network configuration to install on each node.  The special
+  # values in this config will be automatically populated.
+  cni_network_config: |-
+    {
+      "name": "k8s-pod-network",
+      "cniVersion": "0.3.0",
+      "plugins": [
+        {
+          "type": "calico",
+          "log_level": "info",
+          "datastore_type": "kubernetes",
+          "nodename": "__KUBERNETES_NODE_NAME__",
+          "mtu": __CNI_MTU__,
+          "ipam": {
+            "type": "host-local",
+            "subnet": "usePodCidr"
+          },
+          "policy": {
+              "type": "k8s"
+          },
+          "kubernetes": {
+              "kubeconfig": "__KUBECONFIG_FILEPATH__"
+          }
+        },
+        {
+          "type": "portmap",
+          "snat": true,
+          "capabilities": {"portMappings": true}
+        }
+      ]
+    }
+
+---
+
+
+# This manifest creates a Service,
+# which will be backed by Calico's Typha daemon.
+# Typha sits in between Felix and the API server,
+# reducing Calico's load on the API server.
+
+apiVersion: v1
+kind: Service
+metadata:
+  name: calico-typha
+  namespace: kube-system
+  labels:
+    k8s-app: calico-typha
+spec:
+  ports:
+    - port: 5473
+      protocol: TCP
+      targetPort: calico-typha
+      name: calico-typha
+  selector:
+    k8s-app: calico-typha
+
+---
+
+# This manifest creates a Deployment of Typha to back the above service.
+
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+  name: calico-typha
+  namespace: kube-system
+  labels:
+    k8s-app: calico-typha
+spec:
+  # Number of Typha replicas.
+  # To enable Typha, set this to a non-zero value *and* set the
+  # typha_service_name variable in the calico-config ConfigMap above.
+  #
+  # We recommend using Typha if you have more than 50 nodes.
+  # Above 100 nodes it is essential
+  # (when using the Kubernetes datastore).
+  # Use one replica for every 100-200 nodes.  In production,
+  # we recommend running at least 3 replicas to reduce the
+  # impact of rolling upgrade.
+  replicas: 0
+  revisionHistoryLimit: 2
+  template:
+    metadata:
+      labels:
+        k8s-app: calico-typha
+      annotations:
+        # This, along with the CriticalAddonsOnly toleration below,
+        # marks the pod as a critical
+        # add-on, ensuring it gets priority scheduling
+        # and that its resources are reserved
+        # if it ever gets evicted.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+        cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
+    spec:
+      nodeSelector:
+        beta.kubernetes.io/os: linux
+      hostNetwork: true
+      tolerations:
+        # Mark the pod as a critical add-on for rescheduling.
+        - key: CriticalAddonsOnly
+          operator: Exists
+      # Since Calico can't network a pod until Typha is up,
+      # we need to run Typha itself as a host-networked pod.
+      serviceAccountName: calico-node
+      containers:
+        - image: calico/typha:v3.3.4
+          name: calico-typha
+          ports:
+            - containerPort: 5473
+              name: calico-typha
+              protocol: TCP
+          env:
+            # Enable "info" logging by default.
+            # Can be set to "debug" to increase verbosity.
+            - name: TYPHA_LOGSEVERITYSCREEN
+              value: "info"
+            # Disable logging to file and syslog
+            # since those don't make sense in K8s.
+            - name: TYPHA_LOGFILEPATH
+              value: "none"
+            - name: TYPHA_LOGSEVERITYSYS
+              value: "none"
+            # Monitor the Kubernetes API to find the number of running instances
+            # and rebalance connections.
+            - name: TYPHA_CONNECTIONREBALANCINGMODE
+              value: "kubernetes"
+            - name: TYPHA_DATASTORETYPE
+              value: "kubernetes"
+            - name: TYPHA_HEALTHENABLED
+              value: "true"
+              # Uncomment these lines to enable prometheus metrics.
+              # Since Typha is host-networked,
+              # this opens a port on the host, which may need to be secured.
+              # - name: TYPHA_PROMETHEUSMETRICSENABLED
+              #  value: "true"
+              # - name: TYPHA_PROMETHEUSMETRICSPORT
+              #  value: "9093"
+          livenessProbe:
+            exec:
+              command:
+                - calico-typha
+                - check
+                - liveness
+            periodSeconds: 30
+            initialDelaySeconds: 30
+          readinessProbe:
+            exec:
+              command:
+                - calico-typha
+                - check
+                - readiness
+            periodSeconds: 10
+---
+
+# This manifest creates a Pod Disruption Budget
+# for Typha to allow K8s Cluster Autoscaler to evict
+
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+  name: calico-typha
+  namespace: kube-system
+  labels:
+    k8s-app: calico-typha
+spec:
+  maxUnavailable: 1
+  selector:
+    matchLabels:
+      k8s-app: calico-typha
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+  name: calico-node
+  namespace: kube-system
+  labels:
+    k8s-app: calico-node
+spec:
+  selector:
+    matchLabels:
+      k8s-app: calico-node
+  updateStrategy:
+    type: RollingUpdate
+    rollingUpdate:
+      maxUnavailable: 1
+  template:
+    metadata:
+      labels:
+        k8s-app: calico-node
+      annotations:
+        # This, along with the CriticalAddonsOnly toleration below,
+        # marks the pod as a critical add-on, ensuring it gets
+        # priority scheduling and that its resources are reserved
+        # if it ever gets evicted.
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      nodeSelector:
+        beta.kubernetes.io/os: linux
+      hostNetwork: true
+      tolerations:
+        # Make sure calico-node gets scheduled on all nodes.
+        - effect: NoSchedule
+          operator: Exists
+        # Mark the pod as a critical add-on for rescheduling.
+        - key: CriticalAddonsOnly
+          operator: Exists
+        - effect: NoExecute
+          operator: Exists
+      serviceAccountName: calico-node
+      # Minimize downtime during a rolling upgrade or deletion;
+      # tell Kubernetes to do a "force deletion"
+      # https://kubernetes.io/docs/concepts
+      # /workloads/pods/pod/#termination-of-pods.
+      terminationGracePeriodSeconds: 0
+      containers:
+        # Runs calico/node container on each Kubernetes node.  This
+        # container programs network policy and routes on each
+        # host.
+        - name: calico-node
+          image: calico/node:v3.3.4
+          env:
+            # Use Kubernetes API as the backing datastore.
+            - name: DATASTORE_TYPE
+              value: "kubernetes"
+            # Typha support: controlled by the ConfigMap.
+            - name: FELIX_TYPHAK8SSERVICENAME
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: typha_service_name
+            # Wait for the datastore.
+            - name: WAIT_FOR_DATASTORE
+              value: "true"
+            # Set based on the k8s node name.
+            - name: NODENAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+            # Choose the backend to use.
+            - name: CALICO_NETWORKING_BACKEND
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: calico_backend
+            # Cluster type to identify the deployment type
+            - name: CLUSTER_TYPE
+              value: "k8s,bgp"
+            # Auto-detect the BGP IP address.
+            - name: IP
+              value: "autodetect"
+            # Enable IPIP
+            - name: CALICO_IPV4POOL_IPIP
+              value: "Always"
+            # Set MTU for tunnel device used if ipip is enabled
+            - name: FELIX_IPINIPMTU
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: veth_mtu
+            # The default IPv4 pool to create on startup if none exists.
+            # Pod IPs will be   chosen from this range.
+            # Changing this value after installation will have
+            # no effect. This should fall within `--cluster-cidr`.
+            - name: CALICO_IPV4POOL_CIDR
+              value: "192.168.0.0/16"
+            # Disable file logging so `kubectl logs` works.
+            - name: CALICO_DISABLE_FILE_LOGGING
+              value: "true"
+            # Set Felix endpoint to host default action to ACCEPT.
+            - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+              value: "ACCEPT"
+            # Disable IPv6 on Kubernetes.
+            - name: FELIX_IPV6SUPPORT
+              value: "false"
+            # Set Felix logging to "info"
+            - name: FELIX_LOGSEVERITYSCREEN
+              value: "info"
+            - name: FELIX_HEALTHENABLED
+              value: "true"
+          securityContext:
+            privileged: true
+          resources:
+            requests:
+              cpu: 250m
+          livenessProbe:
+            httpGet:
+              path: /liveness
+              port: 9099
+              host: localhost
+            periodSeconds: 10
+            initialDelaySeconds: 10
+            failureThreshold: 6
+          readinessProbe:
+            exec:
+              command:
+                - /bin/calico-node
+                - -bird-ready
+                - -felix-ready
+            periodSeconds: 10
+          volumeMounts:
+            - mountPath: /lib/modules
+              name: lib-modules
+              readOnly: true
+            - mountPath: /run/xtables.lock
+              name: xtables-lock
+              readOnly: false
+            - mountPath: /var/run/calico
+              name: var-run-calico
+              readOnly: false
+            - mountPath: /var/lib/calico
+              name: var-lib-calico
+              readOnly: false
+        # This container installs the Calico CNI binaries
+        # and CNI network config file on each node.
+        - name: install-cni
+          image: calico/cni:v3.3.4
+          command: ["/install-cni.sh"]
+          env:
+            # Name of the CNI config file to create.
+            - name: CNI_CONF_NAME
+              value: "10-calico.conflist"
+            # Set the hostname based on the k8s node name.
+            - name: KUBERNETES_NODE_NAME
+              valueFrom:
+                fieldRef:
+                  fieldPath: spec.nodeName
+            # The CNI network config to install on each node.
+            - name: CNI_NETWORK_CONFIG
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: cni_network_config
+            # CNI MTU Config variable
+            - name: CNI_MTU
+              valueFrom:
+                configMapKeyRef:
+                  name: calico-config
+                  key: veth_mtu
+          volumeMounts:
+            - mountPath: /host/opt/cni/bin
+              name: cni-bin-dir
+            - mountPath: /host/etc/cni/net.d
+              name: cni-net-dir
+      volumes:
+        # Used by calico/node.
+        - name: lib-modules
+          hostPath:
+            path: /lib/modules
+        - name: var-run-calico
+          hostPath:
+            path: /var/run/calico
+        - name: var-lib-calico
+          hostPath:
+            path: /var/lib/calico
+        - name: xtables-lock
+          hostPath:
+            path: /run/xtables.lock
+            type: FileOrCreate
+        # Used to install CNI.
+        - name: cni-bin-dir
+          hostPath:
+            path: /opt/cni/bin
+        - name: cni-net-dir
+          hostPath:
+            path: /etc/cni/net.d
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: calico-node
+  namespace: kube-system
+
+---
+
+# Create all the CustomResourceDefinitions needed for
+# Calico policy and networking mode.
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: felixconfigurations.crd.projectcalico.org
+spec:
+  scope: Cluster
+  group: crd.projectcalico.org
+  version: v1
+  names:
+    kind: FelixConfiguration
+    plural: felixconfigurations
+    singular: felixconfiguration
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: bgppeers.crd.projectcalico.org
+spec:
+  scope: Cluster
+  group: crd.projectcalico.org
+  version: v1
+  names:
+    kind: BGPPeer
+    plural: bgppeers
+    singular: bgppeer
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: bgpconfigurations.crd.projectcalico.org
+spec:
+  scope: Cluster
+  group: crd.projectcalico.org
+  version: v1
+  names:
+    kind: BGPConfiguration
+    plural: bgpconfigurations
+    singular: bgpconfiguration
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: ippools.crd.projectcalico.org
+spec:
+  scope: Cluster
+  group: crd.projectcalico.org
+  version: v1
+  names:
+    kind: IPPool
+    plural: ippools
+    singular: ippool
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: hostendpoints.crd.projectcalico.org
+spec:
+  scope: Cluster
+  group: crd.projectcalico.org
+  version: v1
+  names:
+    kind: HostEndpoint
+    plural: hostendpoints
+    singular: hostendpoint
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: clusterinformations.crd.projectcalico.org
+spec:
+  scope: Cluster
+  group: crd.projectcalico.org
+  version: v1
+  names:
+    kind: ClusterInformation
+    plural: clusterinformations
+    singular: clusterinformation
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: globalnetworkpolicies.crd.projectcalico.org
+spec:
+  scope: Cluster
+  group: crd.projectcalico.org
+  version: v1
+  names:
+    kind: GlobalNetworkPolicy
+    plural: globalnetworkpolicies
+    singular: globalnetworkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: globalnetworksets.crd.projectcalico.org
+spec:
+  scope: Cluster
+  group: crd.projectcalico.org
+  version: v1
+  names:
+    kind: GlobalNetworkSet
+    plural: globalnetworksets
+    singular: globalnetworkset
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+  name: networkpolicies.crd.projectcalico.org
+spec:
+  scope: Namespaced
+  group: crd.projectcalico.org
+  version: v1
+  names:
+    kind: NetworkPolicy
+    plural: networkpolicies
+    singular: networkpolicy
diff --git a/scripts/cni/calico/rbac.yaml b/scripts/cni/calico/rbac.yaml
new file mode 100644 (file)
index 0000000..e4632af
--- /dev/null
@@ -0,0 +1,92 @@
+# Calico Version v3.3.4
+# https://docs.projectcalico.org/v3.3/releases#v3.3.4
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: calico-node
+rules:
+  - apiGroups: [""]
+    resources:
+      - namespaces
+      - serviceaccounts
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups: [""]
+    resources:
+      - pods/status
+    verbs:
+      - patch
+  - apiGroups: [""]
+    resources:
+      - pods
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups: [""]
+    resources:
+      - services
+    verbs:
+      - get
+  - apiGroups: [""]
+    resources:
+      - endpoints
+    verbs:
+      - get
+  - apiGroups: [""]
+    resources:
+      - nodes
+    verbs:
+      - get
+      - list
+      - update
+      - watch
+  - apiGroups: ["extensions"]
+    resources:
+      - networkpolicies
+    verbs:
+      - get
+      - list
+      - watch
+  - apiGroups: ["networking.k8s.io"]
+    resources:
+      - networkpolicies
+    verbs:
+      - watch
+      - list
+  - apiGroups: ["crd.projectcalico.org"]
+    resources:
+      - globalfelixconfigs
+      - felixconfigurations
+      - bgppeers
+      - globalbgpconfigs
+      - bgpconfigurations
+      - ippools
+      - globalnetworkpolicies
+      - globalnetworksets
+      - networkpolicies
+      - clusterinformations
+      - hostendpoints
+    verbs:
+      - create
+      - get
+      - list
+      - update
+      - watch
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+  name: calico-node
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: calico-node
+subjects:
+  - kind: ServiceAccount
+    name: calico-node
+    namespace: kube-system
diff --git a/scripts/common.sh b/scripts/common.sh
new file mode 100644 (file)
index 0000000..e2ef825
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# This script is to install common software for ELIOT.
+# To be executed in Eliot Manager and Eliot Nodes.
+# Script will install Docker software.
+# Script has to be executed in Ubuntu 16.04.
+
+# Set Docker version
+DOCKER_VERSION=18.06.1~ce~3-0~ubuntu
+
+sudo apt-get update && sudo apt-get install -y git
+
+# Install Docker as Prerequisite
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo apt-key fingerprint 0EBFCD88
+sudo add-apt-repository \
+  "deb https://download.docker.com/linux/ubuntu \
+  $(lsb_release -cs) \
+  stable"
+
+sudo apt update
+sudo apt install -y docker-ce=${DOCKER_VERSION}
+
diff --git a/scripts/k8smaster.sh b/scripts/k8smaster.sh
new file mode 100644 (file)
index 0000000..e13e662
--- /dev/null
@@ -0,0 +1,55 @@
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+KUBE_VERSION=1.13.0-00
+POD_NETWORK_CIDR=192.168.0.0/16
+K8S_CNI_VERSION=0.6.0-00
+
+# Install Kubernetes with Kubeadm
+
+# Disable swap
+sudo swapoff -a
+sudo apt update
+sudo apt install -y apt-transport-https curl
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+
+sudo apt update
+sudo apt install -y \
+  kubernetes-cni=${K8S_CNI_VERSION} kubelet=${KUBE_VERSION} \
+  kubeadm=${KUBE_VERSION} kubectl=${KUBE_VERSION}
+
+sudo apt-mark hold kubelet kubeadm kubectl
+
+if ! kubectl get nodes; then
+  hostname -I > hostname.tmp
+  MASTER_IP="$(cut -d ' ' -f 1 hostname.tmp)"
+  rm hostname.tmp
+  sudo kubeadm config images pull
+  sudo kubeadm init \
+        --apiserver-advertise-address="${MASTER_IP}" \
+        --pod-network-cidr="${POD_NETWORK_CIDR}"
+
+  if [ "$(id -u)" = 0 ]; then
+    echo "export KUBECONFIG=/etc/kubernetes/admin.conf" | \
+      tee -a "${HOME}/.profile"
+    source "${HOME}/.profile"
+  else
+    mkdir -p "${HOME}/.kube"
+    sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+    sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
+  fi
+  kubectl apply -f "cni/calico/rbac.yaml"
+  kubectl apply -f "cni/calico/calico.yaml"
+
+fi
diff --git a/scripts/k8sworker.sh b/scripts/k8sworker.sh
new file mode 100644 (file)
index 0000000..b43d4a2
--- /dev/null
@@ -0,0 +1,30 @@
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+KUBE_VERSION=1.13.0-00
+K8S_CNI_VERSION=0.6.0-00
+
+# Install Kubernetes with Kubeadm
+# The script will be executed in Eliot Edge Node
+
+sudo swapoff -a
+sudo apt update
+sudo apt install -y apt-transport-https curl
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+
+sudo apt update
+sudo apt install -y \
+  kubeadm=${KUBE_VERSION} kubelet=${KUBE_VERSION} kubernetes-cni=${K8S_CNI_VERSION}
+
+#sudo apt-mark hold kubelet kubeadm
diff --git a/scripts/nodelist b/scripts/nodelist
new file mode 100644 (file)
index 0000000..bf7f7c8
--- /dev/null
@@ -0,0 +1 @@
+<eliotedgenodeusername>|<eliotedgenodeip>|<eliotedgenodepassword>
diff --git a/scripts/setup.sh b/scripts/setup.sh
new file mode 100755 (executable)
index 0000000..e5da102
--- /dev/null
@@ -0,0 +1,87 @@
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+########################################################################################
+#                                                                                      #
+# The script is to setup the ELIOT Manager and ELIOT nodes.                            #
+# It installs Docker in both ELIOT Manager and ELIOT node.                             #
+# It installs Kubernetes. In the ELIOT Manager kubeadm, kubelet, kubectl is installed. #
+# In ELIOT Edge Node it will install kubeadn, kubelet.                                 #
+# Script is tested in Ubuntu 16.04 version.                                            #
+# sshpass needs to be installed before executing this script.                          #
+########################################################################################
+
+show_help()
+{
+  echo "The script helps in setting up the ELIOT Toplogy Infrastrucutre"
+  echo "The setup installs Docker, K8S Master and K8S worker nodes in  "
+  echo "ELIOT Manager and ELIOT Workder Nodes respectively "
+  echo "After completion of script execution execute command: "
+  echo "kubectl get nodes to check whether the connection between "
+  echo "ELIOT Manager and ELIOT Nodes are established"
+  echo ""
+  echo "Nodelist file should have the details of Worker Nodes in the format of:"
+  echo "EliotNodeUserName|EliotNodeIP|EliotNodePasswor"
+  echo "Each line should have detail of one ELIOT Node only"
+}
+
+# Setting up ELIOT Manager Node.
+# Installing Docker, K8S and Initializing K8S Master
+setup_k8smaster()
+{
+  set -o xtrace
+  sudo rm -rf ~/.kube
+  source common.sh | tee eliotcommon.log
+  source k8smaster.sh | tee kubeadm.log
+  # Setup ELIOT Node
+  setup_k8sworkers
+}
+
+setup_k8sworkers()
+{
+  set -o xtrace
+
+  # Install Docker on ELIOT Node
+  ELIOT_REPO="https://gerrit.akraino.org/r/eliot"
+  SETUP_WORKER_COMMON="sudo rm -rf ~/eliot &&\
+                       git clone ${ELIOT_REPO} &&\
+                       cd eliot/scripts && source common.sh"
+  #SETUP_WORKER_COMMON="cd eliot/scripts && source common.sh"
+  SETUP_WORKER="cd eliot/scripts/ && source k8sworker.sh"
+
+  KUBEADM_JOIN=$(grep "kubeadm join " ./kubeadm.log)
+  KUBEADM_JOIN="sudo ${KUBEADM_JOIN}"
+
+ # Read all the Worker Node details from nodelist file.
+ while read line
+ do
+     nodeinfo="${line}"
+     nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+     nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+     nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+     sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER_COMMON} < /dev/null
+     sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER} < /dev/null
+     sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${KUBEADM_JOIN} < /dev/null
+ done < nodelist
+
+}
+
+
+# Start
+#
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+  show_help
+  exit 0
+fi
+
+
+setup_k8smaster