--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+HOSTNAME=`hostname`
+sudo docker run \
+ --volume=/:/rootfs:ro \
+ --volume=/var/run:/var/run:ro \
+ --volume=/sys:/sys:ro \
+ --volume=/var/lib/docker/:/var/lib/docker:ro \
+ --volume=/dev/disk/:/dev/disk:ro \
+ --publish=8081:8080 \
+ --detach=true \
+ --name=cadvisor-${HOSTNAME} \
+ google/cadvisor:latest
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+TESTYAML="testk8s-nginx.yaml"
+
+# start
+
+source ../src/config_kubeedge > /dev/null 2>&1
+cd
+kubectl delete -f $TESTYAML
+
+exec_edge_master(){
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp ${PATH_OF_ELIOTFOLDER}/scripts/ci_management/cleanup_edge.sh \
+ ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ source cleanup_edge.sh
+
+ cd $PATH_OF_ELIOTFOLDER/scripts/ci_management
+ source cleanup_master.sh
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp ${PATH_OF_ELIOTFOLDER}/scripts/ci_management/cleanup_edge_final.sh \
+ ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ source cleanup_edge_final.sh
+
+}
+
+exec_edge_master > /dev/null 2>&1
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+NGINX=$(sudo docker ps | grep nginx | wc -l)
+KUBEPROXY=$(sudo docker ps | grep k8s.gcr.io | wc -l)
+CONSTZERO="0"
+
+# start
+
+source config_kubeedge > /dev/null 2>&1
+source ~/.profile
+
+cd
+
+if [ -d "$GOPATH/src/github.com/kubeedge/kubeedge/keadm" ]; then
+ cd $GOPATH/src/github.com/kubeedge/kubeedge/keadm
+ ./keadm reset --k8sserverip $MASTERNODEIP:8080
+fi
+
+cd /etc/kubeedge
+
+if [ -f "certs.tgz" ]; then
+ sudo rm -rf certs.tgz
+fi
+
+if [ -d "/etc/kubeedge/ca" ]; then
+ sudo rm -rf /etc/kubeedge/ca
+fi
+
+if [ -d "/etc/kubeedge/certs" ]; then
+ sudo rm -rf /etc/kubeedge/certs
+fi
+
+if [ -d "/root/go/src" ]; then
+ sudo rm -rf /root/go/src
+fi
+
+# stop binaries edge_core
+cd /usr/local/bin
+
+if [ -f "edge_core" ]; then
+ sudo rm edge_core
+fi
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+NGINX=$(sudo docker ps | grep nginx | wc -l)
+KUBEPROXY=$(sudo docker ps | grep k8s.gcr.io | wc -l)
+CONSTZERO="0"
+
+# start
+echo "nginx container stop"
+if [ $NGINX != $CONSTZERO ]; then
+ sudo docker kill $(docker ps -q --filter ancestor=nginx:1.15.12 )
+fi
+
+echo "kubeproxy container stop"
+if [ $KUBEPROXY != $CONSTZERO ]; then
+ sudo docker kill $(docker ps -q --filter ancestor=k8s.gcr.io/kube-proxy:v1.14.3 )
+fi
+echo "Finished"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+TESTYAML="testk8s-nginx.yaml"
+SUPERUSER="root"
+value=$(whoami)
+
+# start
+
+# kubeedge reset internally undo the things done by ./kubeedge init
+
+if [ -d "$GOPATH/src/github.com/kubeedge/kubeedge/keadm" ]; then
+ cd $GOPATH/src/github.com/kubeedge/kubeedge/keadm
+ ./keadm reset
+fi
+
+# delete the previously existing certificates
+
+if [ -d "/etc/kubeedge/ca" ]; then
+ sudo rm -rf /etc/kubeedge/ca
+fi
+
+if [ -d "/etc/kubeedge/certs" ]; then
+ cd /etc/kubeedge
+ sudo rm -rf certs
+fi
+
+cd /etc/kubeedge
+if [ -f "certs.tgz" ]; then
+ sudo rm certs.tgz
+fi
+
+# delete the kubeedge code
+
+if [ -d "$GOPATH/src" ]; then
+ cd $GOPATH
+ sudo rm -rf src
+fi
+
+# stop binaries edge_core edgecontroller
+
+cd /usr/local/bin
+
+if [ -f "edge_core" ]; then
+ sudo rm edge_core
+fi
+
+if [ -f "edgecontroller" ]; then
+ sudo rm edgecontroller
+fi
+
+if [ $value != $SUPERUSER ]; then
+ sudo su
+fi
+
+cd
+
+if [ -f $TESTYAML ]; then
+ sudo rm $TESTYAML
+fi
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# The script is to stop and remove the prometheus and cadvisor containers from
+# ELIOT Manager and ELIOT Edge Node respectively.
+
+# stop prometheus in ELIOT Manager
+source uninstall_prometheus.sh | tee uninstall_prometheus.log
+
+#stop cadvisor statement executed at ELIOT Edge Node
+stop_cadvisor_atedge="cd eliot/scripts/ci_management && source uninstall_cadvisor.sh"
+# Read all the Worker Node details from nodelist file.
+while read line
+do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${stop_cadvisor_atedge}
+done < ../nodelist > /dev/null 2>&1
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+#stop cadvisor statement executed at ELIOT Edge Node
+if [ $(sudo docker ps | grep cadvisor | wc -l) -gt 0 ];then
+ sudo docker stop $(sudo docker ps | grep cadvisor | awk '{ print $1 }')
+fi
+
+if [ $(sudo docker ps -a | grep cadvisor | wc -l) -gt 0 ];then
+ sudo docker rm $(sudo docker ps -a | grep cadvisor | awk '{ print $1 }')
+fi
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+# stop prometheus in ELIOT Manager
+
+if [ $(sudo docker ps | grep prometheus | wc -l) -gt 0 ];then
+ echo "Stopping prometheus container id :- $(sudo docker ps | grep prometheus | awk '{ print $1 }')"
+ sudo docker stop $(sudo docker ps | grep prometheus | awk '{ print $1 }')
+fi
+if [ $(sudo docker ps -a | grep prometheus | wc -l) -gt 0 ];then
+ echo "Removing prometheus container id $(sudo docker ps -a | grep prometheus | awk '{ print $1 }')"
+ sudo docker rm $(sudo docker ps -a | grep prometheus | awk '{ print $1 }')
+fi
+
--- /dev/null
+# Calico Version v3.3.4
+# https://docs.projectcalico.org/v3.3/releases#v3.3.4
+# This manifest includes the following component versions:
+# calico/node:v3.3.4
+# calico/cni:v3.3.4
+#
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # To enable Typha, set this to "calico-typha" *and*
+ # set a non-zero value for Typha replicas
+ # below. We recommend using Typha if you have more than 50 nodes.
+ # Above 100 nodes it is essential.
+ typha_service_name: "none"
+ # Configure the Calico backend to use.
+ calico_backend: "bird"
+
+ # Configure the MTU to use
+ veth_mtu: "1440"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.0",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "datastore_type": "kubernetes",
+ "nodename": "__KUBERNETES_NODE_NAME__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "usePodCidr"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+
+---
+
+
+# This manifest creates a Service,
+# which will be backed by Calico's Typha daemon.
+# Typha sits in between Felix and the API server,
+# reducing Calico's load on the API server.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ ports:
+ - port: 5473
+ protocol: TCP
+ targetPort: calico-typha
+ name: calico-typha
+ selector:
+ k8s-app: calico-typha
+
+---
+
+# This manifest creates a Deployment of Typha to back the above service.
+
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ # Number of Typha replicas.
+ # To enable Typha, set this to a non-zero value *and* set the
+ # typha_service_name variable in the calico-config ConfigMap above.
+ #
+ # We recommend using Typha if you have more than 50 nodes.
+ # Above 100 nodes it is essential
+ # (when using the Kubernetes datastore).
+ # Use one replica for every 100-200 nodes. In production,
+ # we recommend running at least 3 replicas to reduce the
+ # impact of rolling upgrade.
+ replicas: 0
+ revisionHistoryLimit: 2
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-typha
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical
+ # add-on, ensuring it gets priority scheduling
+ # and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Since Calico can't network a pod until Typha is up,
+ # we need to run Typha itself as a host-networked pod.
+ serviceAccountName: calico-node
+ containers:
+ - image: calico/typha:v3.3.4
+ name: calico-typha
+ ports:
+ - containerPort: 5473
+ name: calico-typha
+ protocol: TCP
+ env:
+ # Enable "info" logging by default.
+ # Can be set to "debug" to increase verbosity.
+ - name: TYPHA_LOGSEVERITYSCREEN
+ value: "info"
+ # Disable logging to file and syslog
+ # since those don't make sense in K8s.
+ - name: TYPHA_LOGFILEPATH
+ value: "none"
+ - name: TYPHA_LOGSEVERITYSYS
+ value: "none"
+ # Monitor the Kubernetes API to find the number of running instances
+ # and rebalance connections.
+ - name: TYPHA_CONNECTIONREBALANCINGMODE
+ value: "kubernetes"
+ - name: TYPHA_DATASTORETYPE
+ value: "kubernetes"
+ - name: TYPHA_HEALTHENABLED
+ value: "true"
+ # Uncomment these lines to enable prometheus metrics.
+ # Since Typha is host-networked,
+ # this opens a port on the host, which may need to be secured.
+ # - name: TYPHA_PROMETHEUSMETRICSENABLED
+ # value: "true"
+ # - name: TYPHA_PROMETHEUSMETRICSPORT
+ # value: "9093"
+ livenessProbe:
+ exec:
+ command:
+ - calico-typha
+ - check
+ - liveness
+ periodSeconds: 30
+ initialDelaySeconds: 30
+ readinessProbe:
+ exec:
+ command:
+ - calico-typha
+ - check
+ - readiness
+ periodSeconds: 10
+---
+
+# This manifest creates a Pod Disruption Budget
+# for Typha to allow K8s Cluster Autoscaler to evict
+
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-typha
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical add-on, ensuring it gets
+ # priority scheduling and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion;
+ # tell Kubernetes to do a "force deletion"
+ # https://kubernetes.io/docs/concepts
+ # /workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ containers:
+ # Runs calico/node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: calico/node:v3.3.4
+ env:
+ # Use Kubernetes API as the backing datastore.
+ - name: DATASTORE_TYPE
+ value: "kubernetes"
+ # Typha support: controlled by the ConfigMap.
+ - name: FELIX_TYPHAK8SSERVICENAME
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: typha_service_name
+ # Wait for the datastore.
+ - name: WAIT_FOR_DATASTORE
+ value: "true"
+ # Set based on the k8s node name.
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s,bgp"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: "autodetect"
+ # Enable IPIP
+ - name: CALICO_IPV4POOL_IPIP
+ value: "Always"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # The default IPv4 pool to create on startup if none exists.
+ # Pod IPs will be chosen from this range.
+ # Changing this value after installation will have
+ # no effect. This should fall within `--cluster-cidr`.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ host: localhost
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -bird-ready
+ - -felix-ready
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ # This container installs the Calico CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: calico/cni:v3.3.4
+ command: ["/install-cni.sh"]
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # Set the hostname based on the k8s node name.
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # CNI MTU Config variable
+ - name: CNI_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ volumes:
+ # Used by calico/node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+
+---
+
+# Create all the CustomResourceDefinitions needed for
+# Calico policy and networking mode.
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: felixconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: FelixConfiguration
+ plural: felixconfigurations
+ singular: felixconfiguration
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgppeers.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPPeer
+ plural: bgppeers
+ singular: bgppeer
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgpconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPConfiguration
+ plural: bgpconfigurations
+ singular: bgpconfiguration
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ippools.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPPool
+ plural: ippools
+ singular: ippool
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: hostendpoints.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: HostEndpoint
+ plural: hostendpoints
+ singular: hostendpoint
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterinformations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: ClusterInformation
+ plural: clusterinformations
+ singular: clusterinformation
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworkpolicies.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkPolicy
+ plural: globalnetworkpolicies
+ singular: globalnetworkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworksets.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkSet
+ plural: globalnetworksets
+ singular: globalnetworkset
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networkpolicies.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkPolicy
+ plural: networkpolicies
+ singular: networkpolicy
--- /dev/null
+# Calico Version v3.3.4
+# https://docs.projectcalico.org/v3.3/releases#v3.3.4
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-node
+rules:
+ - apiGroups: [""]
+ resources:
+ - namespaces
+ - serviceaccounts
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups: [""]
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ - apiGroups: [""]
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups: [""]
+ resources:
+ - services
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups: ["extensions"]
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups: ["networking.k8s.io"]
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - globalfelixconfigs
+ - felixconfigurations
+ - bgppeers
+ - globalbgpconfigs
+ - bgpconfigurations
+ - ippools
+ - globalnetworkpolicies
+ - globalnetworksets
+ - networkpolicies
+ - clusterinformations
+ - hostendpoints
+ verbs:
+ - create
+ - get
+ - list
+ - update
+ - watch
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+ - kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
--- /dev/null
+# yamllint disable
+---
+# Source: calico/templates/calico-config.yaml
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # Typha is disabled.
+ typha_service_name: "none"
+ # Configure the backend to use.
+ calico_backend: "bird"
+
+ # Configure the MTU to use
+ veth_mtu: "1440"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "datastore_type": "kubernetes",
+ "nodename": "__KUBERNETES_NODE_NAME__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: felixconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: FelixConfiguration
+ plural: felixconfigurations
+ singular: felixconfiguration
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamblocks.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMBlock
+ plural: ipamblocks
+ singular: ipamblock
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: blockaffinities.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BlockAffinity
+ plural: blockaffinities
+ singular: blockaffinity
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamhandles.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMHandle
+ plural: ipamhandles
+ singular: ipamhandle
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamconfigs.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMConfig
+ plural: ipamconfigs
+ singular: ipamconfig
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgppeers.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPPeer
+ plural: bgppeers
+ singular: bgppeer
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgpconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPConfiguration
+ plural: bgpconfigurations
+ singular: bgpconfiguration
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ippools.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPPool
+ plural: ippools
+ singular: ippool
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: hostendpoints.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: HostEndpoint
+ plural: hostendpoints
+ singular: hostendpoint
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterinformations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: ClusterInformation
+ plural: clusterinformations
+ singular: clusterinformation
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworkpolicies.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkPolicy
+ plural: globalnetworkpolicies
+ singular: globalnetworkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworksets.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkSet
+ plural: globalnetworksets
+ singular: globalnetworkset
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networkpolicies.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkPolicy
+ plural: networkpolicies
+ singular: networkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networksets.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkSet
+ plural: networksets
+ singular: networkset
+---
+# Source: calico/templates/rbac.yaml
+
+# Include a clusterrole for the kube-controllers component,
+# and bind it to the calico-kube-controllers serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+rules:
+ # Nodes are watched to monitor for deletions.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - watch
+ - list
+ - get
+ # Pods are queried to check for existence.
+ - apiGroups: [""]
+ resources:
+ - pods
+ verbs:
+ - get
+ # IPAM resources are manipulated when nodes are deleted.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ verbs:
+ - list
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ # Needs access to update clusterinformations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - clusterinformations
+ verbs:
+ - get
+ - create
+ - update
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Include a clusterrole for the calico-node DaemonSet,
+# and bind it to the calico-node serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-node
+rules:
+ # The CNI plugin needs to get pods, nodes, and namespaces.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - services
+ verbs:
+ # Used to discover service IPs for advertisement.
+ - watch
+ - list
+ # Used to discover Typhas.
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes/status
+ verbs:
+ # Needed for clearing NodeNetworkUnavailable flag.
+ - patch
+ # Calico stores some configuration information in node annotations.
+ - update
+ # Watch for changes to Kubernetes NetworkPolicies.
+ - apiGroups: ["networking.k8s.io"]
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+ # Used by Calico for policy information.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - namespaces
+ - serviceaccounts
+ verbs:
+ - list
+ - watch
+ # The CNI plugin patches pods/status.
+ - apiGroups: [""]
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ # Calico monitors various CRDs for config.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - globalfelixconfigs
+ - felixconfigurations
+ - bgppeers
+ - globalbgpconfigs
+ - bgpconfigurations
+ - ippools
+ - ipamblocks
+ - globalnetworkpolicies
+ - globalnetworksets
+ - networkpolicies
+ - networksets
+ - clusterinformations
+ - hostendpoints
+ verbs:
+ - get
+ - list
+ - watch
+ # Calico must create and update some CRDs on startup.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ - felixconfigurations
+ - clusterinformations
+ verbs:
+ - create
+ - update
+ # Calico stores some configuration information on the node.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ # These permissions are only requried for upgrade from v2.6, and can
+ # be removed after upgrade or on fresh installations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - bgpconfigurations
+ - bgppeers
+ verbs:
+ - create
+ - update
+ # These permissions are required for Calico CNI to perform IPAM allocations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ipamconfigs
+ verbs:
+ - get
+ # Block affinities must also be watchable by confd for route aggregation.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ verbs:
+ - watch
+ # The Calico IPAM migration needs to get daemonsets. These permissions can be
+ # removed if not upgrading from an installation using host-local IPAM.
+ - apiGroups: ["apps"]
+ resources:
+ - daemonsets
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+- kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
+
+---
+# Source: calico/templates/calico-node.yaml
+# This manifest installs the calico-node container, as well
+# as the CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical add-on, ensuring it gets
+ # priority scheduling and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ priorityClassName: system-node-critical
+ initContainers:
+ # This container performs upgrade from host-local IPAM to calico-ipam.
+ # It can be deleted if this is a fresh installation, or if you have already
+ # upgraded to use calico-ipam.
+ - name: upgrade-ipam
+ image: calico/cni:v3.8.4
+ command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
+ env:
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ volumeMounts:
+ - mountPath: /var/lib/cni/networks
+ name: host-local-net-dir
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ securityContext:
+ privileged: true
+ # This container installs the CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: calico/cni:v3.8.4
+ command: ["/install-cni.sh"]
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # Set the hostname based on the k8s node name.
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # CNI MTU Config variable
+ - name: CNI_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # Prevents the container from sleeping forever.
+ - name: SLEEP
+ value: "false"
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ securityContext:
+ privileged: true
+ # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
+ # to communicate with Felix over the Policy Sync API.
+ - name: flexvol-driver
+ image: calico/pod2daemon-flexvol:v3.8.4
+ volumeMounts:
+ - name: flexvol-driver-host
+ mountPath: /host/driver
+ securityContext:
+ privileged: true
+ containers:
+ # Runs calico-node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: calico/node:v3.8.4
+ env:
+ # Use Kubernetes API as the backing datastore.
+ - name: DATASTORE_TYPE
+ value: "kubernetes"
+ # Wait for the datastore.
+ - name: WAIT_FOR_DATASTORE
+ value: "true"
+ # Set based on the k8s node name.
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s,bgp"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: "autodetect"
+ # Enable IPIP
+ - name: CALICO_IPV4POOL_IPIP
+ value: "Always"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # The default IPv4 pool to create on startup if none exists. Pod IPs will be
+ # chosen from this range. Changing this value after installation will have
+ # no effect. This should fall within `--cluster-cidr`.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ host: localhost
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -bird-ready
+ - -felix-ready
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ - name: policysync
+ mountPath: /var/run/nodeagent
+ volumes:
+ # Used by calico-node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+ # Mount in the directory for host-local IPAM allocations. This is
+ # used when upgrading from host-local to calico-ipam, and can be removed
+ # if not using the upgrade-ipam init container.
+ - name: host-local-net-dir
+ hostPath:
+ path: /var/lib/cni/networks
+ # Used to create per-pod Unix Domain Sockets
+ - name: policysync
+ hostPath:
+ type: DirectoryOrCreate
+ path: /var/run/nodeagent
+ # Used to install Flex Volume Driver
+ - name: flexvol-driver-host
+ hostPath:
+ type: DirectoryOrCreate
+ path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+
+---
+# Source: calico/templates/calico-kube-controllers.yaml
+
+# See https://github.com/projectcalico/kube-controllers
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+spec:
+ # The controllers can only have a single active instance.
+ replicas: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-kube-controllers
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ serviceAccountName: calico-kube-controllers
+ priorityClassName: system-cluster-critical
+ containers:
+ - name: calico-kube-controllers
+ image: calico/kube-controllers:v3.8.4
+ env:
+ # Choose which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: node
+ - name: DATASTORE_TYPE
+ value: kubernetes
+ readinessProbe:
+ exec:
+ command:
+ - /usr/bin/check-status
+ - -r
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Source: calico/templates/calico-etcd-secrets.yaml
+
+---
+# Source: calico/templates/calico-typha.yaml
+
+---
+# Source: calico/templates/configure-canal.yaml
+
+
--- /dev/null
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# This script is to install common software for ELIOT.
+# To be executed in Eliot Manager and Eliot Nodes.
+# Script will install Docker software.
+# Script has to be executed in Ubuntu 16.04.
+
+# Set Docker version
+DOCKER_VERSION=18.06.1~ce~3-0~ubuntu
+
+sudo apt-get update && sudo apt-get install -y git
+
+# Install Docker as Prerequisite
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo apt-key fingerprint 0EBFCD88
+sudo add-apt-repository \
+ "deb https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+
+sudo apt update
+sudo apt install -y docker-ce=${DOCKER_VERSION}
+
--- /dev/null
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+# constants
+
+DOCKER_VERSION=18.09.6
+KUBE_VERSION=1.15.0-0
+MACHINE=$(uname -m)
+
+# start
+
+# This script will install docker, kubeadm on both Eliot Master and Edge nodes
+
+sudo sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' \
+/etc/sysconfig/selinux
+
+sudo modprobe br_netfilter
+_conf='/etc/sysctl.d/99-akraino-eliot.conf'
+echo 'net.bridge.bridge-nf-call-iptables = 1' |& sudo tee "${_conf}"
+sudo sysctl -q -p "${_conf}"
+
+#echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables
+
+swapoff -a
+
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+
+sudo yum-config-manager \
+--add-repo https://download.docker.com/linux/centos/docker-ce.repo
+
+sudo yum install docker-ce-${DOCKER_VERSION} docker-ce-cli-${DOCKER_VERSION} \
+containerd.io
+
+# Kubernetes repository set
+
+cat <<-EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
+[kubernetes]
+name=Kubernetes
+baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-${MACHINE}
+enabled=1
+gpgcheck=1
+repo_gpgcheck=1
+gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+EOF
+
+# Set SELinux in permissive mode (effectively disabling it)
+setenforce 0
+sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
+
+yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
+systemctl enable --now kubelet
+
+sudo yum install -y kubeadm-${KUBE_VERSION}
+sudo systemctl start docker && sudo systemctl enable docker
+
+sudo systemctl daemon-reload
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+promyml=~/prometheus.yml
+workernodeip=""
+blank=""
+count=1
+firstline=1
+while read line
+do
+ if [ $count -gt $firstline ]; then
+ workernodeip+="','"
+ fi
+ nodeinfo="${line}"
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ echo $nodeip
+ workernodeip+=$nodeip
+ workernodeip+=":8081"
+ echo $workernodeip
+ count=2
+ echo $count
+done < nodelist > /dev/null 2>&1
+
+echo "workernodeip="
+echo $workernodeip
+
+cat <<EOF > "${promyml}"
+---
+global:
+ scrape_interval: 15s
+
+scrape_configs:
+ - job_name: 'prometheus'
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: cadvisor
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['$workernodeip']
+EOF
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+KUBE_VERSION=1.16.0-00
+POD_NETWORK_CIDR=192.168.0.0/16
+K8S_CNI_VERSION=0.7.5-00
+
+#K8s service CIDR range
+K8s_SVC_CIDR=10.96.0.0/12
+
+# Install Kubernetes with Kubeadm
+
+# Disable swap
+sudo swapoff -a
+sudo apt update
+sudo apt install -y apt-transport-https curl
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+
+sudo apt update
+sudo apt install -y \
+ kubernetes-cni=${K8S_CNI_VERSION} kubelet=${KUBE_VERSION} \
+ kubeadm=${KUBE_VERSION} kubectl=${KUBE_VERSION}
+
+sudo apt-mark hold kubelet kubeadm kubectl
+
+if ! kubectl get nodes; then
+ hostname -I > hostname.tmp
+ MASTER_IP="$(cut -d ' ' -f 1 hostname.tmp)"
+ rm hostname.tmp
+ sudo kubeadm config images pull
+ sudo kubeadm init \
+ --apiserver-advertise-address="${MASTER_IP}" \
+ --pod-network-cidr="${POD_NETWORK_CIDR}" \
+ --service-cidr="${K8s_SVC_CIDR}"
+
+ if [ "$(id -u)" = 0 ]; then
+ KUBECONFIG=/etc/kubernetes/admin.conf
+ echo "export KUBECONFIG=/etc/kubernetes/admin.conf" | \
+ tee -a "${HOME}/.profile"
+ source "${HOME}/.profile"
+ else
+ mkdir -p "${HOME}/.kube"
+ sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+ sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
+ fi
+ #kubectl apply -f "cni/calico/rbac.yaml"
+ kubectl apply -f "cni/calico/v38/calico.yaml"
+
+fi
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+# constants
+
+POD_NETWORK_CIDR=192.168.0.0/16
+KUBE_VERSION=1.15.0-0
+KUBERNETES_CNI=0.7.5-0
+
+# start
+
+hostname -I > hostname.tmp
+MASTER_IP="$(cut -d ' ' -f 1 hostname.tmp)"
+rm hostname.tmp
+
+# kubernetes installation
+
+sudo yum install -y kubelet-${KUBE_VERSION} kubectl-${KUBE_VERSION} \
+kubernetes-cni-${KUBERNETES_CNI}
+
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
+
+# Initialize kubernetes on master
+
+sudo kubeadm init \
+ --apiserver-advertise-address="${MASTER_IP}" \
+ --pod-network-cidr="${POD_NETWORK_CIDR}"
+
+mkdir -p "${HOME}/.kube"
+sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+KUBE_VERSION=1.16.0-00
+K8S_CNI_VERSION=0.7.5-00
+
+# Install Kubernetes with Kubeadm
+# The script will be executed in Eliot Edge Node
+
+sudo swapoff -a
+sudo apt update
+sudo apt install -y apt-transport-https curl
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+
+sudo apt update
+sudo apt install -y \
+ kubeadm=${KUBE_VERSION} kubelet=${KUBE_VERSION} kubernetes-cni=${K8S_CNI_VERSION}
+
+#sudo apt-mark hold kubelet kubeadm
--- /dev/null
+
+######################################################################
+# #
+# The script is to undo the changes on ELIOT Manager and ELIOT nodes #
+# done by setup.sh file. #
+# It uninstalls docker, kubernetes. #
+# It releases the port used. #
+# It deletes the files created for kubernetes in node machine #
+# Script is tested in Ubuntu 16.04 version. #
+######################################################################
+
+# constants
+OSPLATFORM=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+
+show_help()
+{
+ echo "This script will remove docker and its related files from the master and node machines"
+ echo "This script will remove kubeadm kubectl kubelet kubernetes from the master and node machines"
+ echo "The changes will be first executed on manager machine and then node machines."
+ echo "It will pick the node machine details from nodelist file"
+ echo "This file supports Linux- Ubuntu version only"
+}
+
+# Rollbacking the changes on ELIOT Manager Node
+rollback_k8smaster()
+{
+if [ "$(id -u)" = 0 ]; then
+ sudo apt-get install iptables
+ sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
+ sudo apt-get install ipvsadm
+ sudo fuser -k -n tcp 10250
+ sudo yes y | apt-get purge -y docker-engine
+ sudo yes y | apt-get purge -y docker
+ sudo yes y | apt-get purge -y docker.io
+ sudo yes y | apt-get purge -y docker-ce
+ sudo yes y | apt-get purge -y docker-ce-cli
+ sudo yes y | groupdel docker
+ sudo yes y | kubeadm reset
+ sudo yes y | apt-get purge kubeadm
+ sudo yes y | apt-get purge kubectl
+ sudo yes y | apt-get purge kubelet
+ sudo yes y | apt-get purge kube*
+ sudo yes y | apt-get purge kubernetes-cni
+ sudo rm -rf ~/.kube
+ sudo yes y | apt-get autoremove
+ sudo yes y | apt-get autoclean
+else
+ sudo fuser -k -n tcp 10250
+ sudo yes y | sudo apt-get purge -y docker-engine
+ sudo yes y | sudo apt-get purge -y docker
+ sudo yes y | sudo apt-get purge -y docker.io
+ sudo yes y | sudo apt-get purge -y docker-ce
+ sudo yes y | sudo apt-get purge -y docker-ce-cli
+ sudo yes y | sudo kubeadm reset
+ sudo yes y | sudo apt-get purge kubeadm
+ sudo yes y | sudo apt-get purge kubectl
+ sudo yes y | sudo apt-get purge kubelet
+ sudo yes y | sudo apt-get purge kube*
+ sudo yes y | sudo apt-get purge kubernetes-cni
+ sudo rm -rf ~/.kube
+fi
+
+rollback_k8sworkers
+
+}
+
+#Rollbacking the changes on ELIOT Worker Node
+rollback_k8sworkers()
+{
+if [ " $(id -u)" = 0]; then
+ INSTALL_IPVSADM="sudo apt-get install ipvsadm"
+ RESET_PORT="fuser -k -n tcp 10250"
+ #REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf !('manifests') "
+ REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+ REMOVE_DOCKER1="sudo yes y | apt-get purge -y docker-engine"
+ REMOVE_DOCKER2="sudo yes y | apt-get purge -y docker"
+ REMOVE_DOCKER3="sudo yes y | apt-get purge -y docker.io"
+ REMOVE_DOCKER4="sudo yes y | apt-get purge -y docker-ce"
+ REMOVE_DOCKER5="sudo yes y | apt-get purge -y docker-ce-cli"
+ REMOVE_DOCKER6="sudo yes y | groupdel docker"
+ RESET_KUBEADM="sudo yes y | kubeadm reset"
+ REMOVE_KUBE_FILES1="sudo yes y | apt-get purge kubeadm"
+ REMOVE_KUBE_FILES2="sudo yes y | apt-get purge kubectl "
+ REMOVE_KUBE_FILES3="sudo yes y | apt-get purge kubelet "
+ REMOVE_KUBE_FILES4="sudo yes y | apt-get purge kube* "
+ REMOVE_KUBE_FILES5="sudo yes y | apt-get purge kubernetes-cni"
+ REMOVE_KUBE_FILES6="sudo rm -rf ~/.kube"
+ AUTO_REMOVE="sudo yes y | apt-get autoremove"
+ AUTO_CLEAN="sudo yes y | apt-get autoclean"
+else
+ RESET_PORT="fuser -k -n tcp 10250"
+ REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+ REMOVE_DOCKER1="sudo yes y | sudo apt-get purge -y docker-engine"
+ REMOVE_DOCKER2="sudo yes y | sudo apt-get purge -y docker"
+ REMOVE_DOCKER3="sudo yes y | sudo apt-get purge -y docker.io"
+ REMOVE_DOCKER4="sudo yes y | sudo apt-get purge -y docker-ce"
+ REMOVE_DOCKER5="sudo yes y | sudo apt-get purge -y docker-ce-cli"
+ REMOVE_DOCKER6="sudo yes y | sudo groupdel docker"
+ RESET_KUBEADM="sudo yes y | sudo kubeadm reset"
+ REMOVE_KUBE_FILES1="sudo yes y | sudo apt-get purge kubeadm"
+ REMOVE_KUBE_FILES2="sudo yes y | sudo apt-get purge kubectl "
+ REMOVE_KUBE_FILES3="sudo yes y | sudo apt-get purge kubelet "
+ REMOVE_KUBE_FILES4="sudo yes y | sudo apt-get purge kube* "
+ REMOVE_KUBE_FILES5="sudo yes y | sudo apt-get purge kubernetes-cni"
+ REMOVE_KUBE_FILES6="sudo rm -rf ~/.kube"
+fi
+
+#Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${INSTALL_IPVSADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_PORT} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER1} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER2} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER3} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER4} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER5} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER6} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_KUBEADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES1} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES2} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES3} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES4} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES5} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES6} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${AUTO_REMOVE} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${AUTO_CLEAN} < /dev/null
+ done < nodelist > /dev/null 2>&1
+
+}
+
+verify_reset_status()
+{
+echo "Success!!"
+}
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ show_help
+ exit 0
+fi
+
+if [[ $OSPLATFORM = *Ubuntu* ]]; then
+ rollback_k8smaster
+ verify_reset_status
+else
+ echo "Script only supports Ubuntu Version."
+fi
--- /dev/null
+########################################################################################
+# #
+# The script is to reset the settings on ELIOT Manager and ELIOT nodes #
+# before running the setup.sh file again on the same setup. #
+# It resets the settings of kubeadm and restarts its service #
+# It releases the ports used. #
+# It deletes the files created for kubernetes on node machine #
+# Script is tested in Ubuntu 16.04 version. #
+########################################################################################
+
+# constants
+OSPLATFORM=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+
+show_help()
+{
+ echo "The script is to reset the settings on ELIOT Manager and ELIOT nodes which "
+ echo "needs to be done before executing the setup.sh file again."
+ echo "The changes will be first executed on manager machine and then on the node machines."
+ echo "It will pick the node machine details from nodelist file"
+}
+
+# Resetting ELIOT Manager Node
+reset_k8smaster()
+{
+ sudo yes y | kubeadm reset
+ sudo apt-get install iptables
+ sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
+ sudo apt-get install ipvsadm
+ sudo systemctl restart kubelet
+ sudo fuser -k -n tcp 10250
+
+reset_k8sworkers
+}
+
+#Resetting ELIOT Worker Node
+reset_k8sworkers()
+{
+RESET_KUBEADM="sudo yes y | kubeadm reset"
+INSTALL_IPVSADM="sudo apt-get install ipvsadm"
+RESTART_KUBELET="sudo systemctl restart kubelet"
+RESET_PORT="sudo fuser -k -n tcp 10250"
+#REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf !('manifests') "
+REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+REMOVE_CADVISOR_FILES="docker rm cadvisor-iot-node1"
+
+#Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_KUBEADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${INSTALL_IPVSADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESTART_KUBELET} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_PORT} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_CADVISOR_FILES} < /dev/null
+ done < nodelist > /dev/null 2>&1
+}
+
+verify_reset_status()
+{
+echo "Success!!"
+}
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ show_help
+ exit 0
+fi
+
+if [[ $OSPLATFORM = *Ubuntu* ]]; then
+ reset_k8smaster
+ verify_reset_status
+else
+ echo "The script supports only Linux - Ubuntu"
+fi
--- /dev/null
+<eliotedgenodeusername>|<eliotedgenodeip>|<eliotedgenodepassword>
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# start
+PROMETHEUS_HOST_PORT="9090"
+PROMETHEUS_CONTAINTER_PORT="9090"
+#cp ci_management/prometheus.yml $HOME
+source generatePromeyml.sh
+if [ ! -d "/etc/prometheus" ]; then
+ sudo mkdir /etc/prometheus
+fi
+
+sudo docker run -p ${PROMETHEUS_HOST_PORT}:${PROMETHEUS_CONTAINTER_PORT} \
+ -v ~/prometheus.yml:/etc/prometheus/prometheus.yml \
+ -d prom/prometheus \
+ --config.file=/etc/prometheus/prometheus.yml
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+########################################################################################
+# #
+# The script is to setup the ELIOT Manager and ELIOT nodes. #
+# It installs Docker in both ELIOT Manager and ELIOT node. #
+# It installs Kubernetes. In the ELIOT Manager kubeadm, kubelet, kubectl is installed. #
+# In ELIOT Edge Node it will install kubeadn, kubelet. #
+# Script is tested in Ubuntu 16.04 version. #
+# sshpass needs to be installed before executing this script. #
+########################################################################################
+
+# constants
+
+OSPLATFORM=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+ELIOT_REPO="https://gerrit.akraino.org/r/eliot"
+
+show_help()
+{
+ echo "The script helps in setting up the ELIOT Toplogy Infrastrucutre"
+ echo "The setup installs Docker, K8S Master and K8S worker nodes in "
+ echo "ELIOT Manager and ELIOT Workder Nodes respectively "
+ echo "After completion of script execution execute command: "
+ echo "kubectl get nodes to check whether the connection between "
+ echo "ELIOT Manager and ELIOT Nodes are established"
+ echo ""
+ echo "Nodelist file should have the details of Worker Nodes in the format of:"
+ echo "EliotNodeUserName|EliotNodeIP|EliotNodePasswor"
+ echo "Each line should have detail of one ELIOT Node only"
+}
+
+# Setting up ELIOT Manager Node.
+# Installing Docker, K8S and Initializing K8S Master
+setup_k8smaster()
+{
+ #set -o xtrace
+ sudo rm -rf ~/.kube
+ source common.sh | tee eliotcommon.log
+ source k8smaster.sh | tee kubeadm.log
+ # Setup ELIOT Node
+ setup_k8sworkers
+}
+
+setup_k8sworkers()
+{
+ set -o xtrace
+
+ # Install Docker on ELIOT Node
+ SETUP_WORKER_COMMON="sudo rm -rf ~/eliot &&\
+ git clone ${ELIOT_REPO} &&\
+ cd eliot/scripts/ && source common.sh"
+ #SETUP_WORKER_COMMON="cd eliot/scripts/ && source common.sh"
+ SETUP_WORKER="cd eliot/scripts/ && source k8sworker.sh"
+
+ KUBEADM_TOKEN=$(kubeadm token create --print-join-command)
+ KUBEADM_JOIN="sudo ${KUBEADM_TOKEN}"
+
+ # Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER_COMMON} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${KUBEADM_JOIN} < /dev/null
+ done < nodelist > /dev/null 2>&1
+
+}
+
+setup_k8smaster_centos()
+{
+ set -o xtrace
+ sudo rm -rf ~/.kube
+ source common_centos.sh | tee eliotcommon_centos.log
+ source k8smaster_centos.sh | tee kubeadm_centos.log
+
+ # Setup ELIOT Node
+ setup_k8sworkers_centos
+
+ kubectl apply -f cni/calico/rbac.yaml
+ kubectl apply -f cni/calico/calico.yaml
+
+}
+
+
+setup_k8sworkers_centos()
+{
+ set -o xtrace
+ # Install Docker on ELIOT Node
+
+ SETUP_WORKER_COMMON_CENTOS="sudo rm -rf ~/eliot &&\
+ git clone ${ELIOT_REPO} &&\
+ cd eliot/scripts/ && source common_centos.sh"
+
+ # SETUP_WORKER_COMMON_CENTOS="cd /root/eliot/scripts/ && source common_centos.sh"
+
+ KUBEADM_TOKEN=$(sudo kubeadm token create --print-join-command)
+ KUBEADM_JOIN_CENTOS="sudo ${KUBEADM_TOKEN}"
+ # Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}" < /dev/null 2>&1
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1) < /dev/null
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2) < /dev/null
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3) < /dev/null
+ sudo sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER_COMMON_CENTOS} < /dev/null
+ sudo sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${KUBEADM_JOIN_CENTOS} < /dev/null
+ done < nodelist > /dev/null 2>&1
+
+}
+
+# verify kubernetes setup by deploying nginx server.
+
+verify_k8s_status(){
+ set -o xtrace
+ source verifyk8s.sh | tee verifyk8s.log
+}
+
+# install_edgex method removed
+
+install_cadvisor_edge(){
+ set -o xtrace
+ SETUP_CADVISOR_ATEDGE="cd eliot/scripts/ && source cadvisorsetup.sh"
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_CADVISOR_ATEDGE} < /dev/null
+ done < nodelist > /dev/null 2>&1
+}
+
+install_prometheus(){
+set -o xtrace
+source prometheus.sh | tee install_prometheus.log
+}
+
+# Start
+#
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ show_help
+ exit 0
+fi
+
+if [[ $OSPLATFORM = *CentOS* ]]; then
+ setup_k8smaster_centos
+else
+ setup_k8smaster
+fi
+
+sleep 20
+verify_k8s_status
+install_cadvisor_edge
+sleep 10
+install_prometheus
+sleep 5
+sudo docker ps | grep prometheus
+
+# install_edgex removed
+
+sleep 20
+
+# Removing the taint from master node
+kubectl taint nodes --all node-role.kubernetes.io/master- || true
+
+echo "ELIOT Setup execution is Completed..."
+
--- /dev/null
+# edge node user name
+EDGENODEUSR=""
+
+# edge node ip
+EDGENODEIP=""
+
+# edge node password
+EDGENODEPASSWORD=""
+
+# master node user name
+MASTERNODEUSR=""
+
+# master node ip
+MASTERNODEIP=""
+
+# master node password
+MASTERNODEPASSWORD=""
+
+# eliot source code path including eliot folder
+PATH_OF_ELIOTFOLDER=""
+
+# home path of edge node
+HOME_EDGENODE=""
+
+# edgenode id for kubeedge configuration
+EDGENODEID=""
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+#Constants
+KUBEEDGE_SRC="$GOPATH/src/github.com/kubeedge/kubeedge"
+KUBEEDGE_BIN="$GOPATH/src/github.com/kubeedge/kubeedge/keadm"
+VERIFY_K8S="$PATH_OF_ELIOTFOLDER/scripts/verifyk8s.sh"
+
+{ set +x; } > /dev/null 2>&1
+
+if [ -n "$1" ]; then
+
+if [ "$1" != "--help" ]; then
+ echo ""
+ echo "Usage of the command is wrong.. Please type ./kubeedge_setup.sh --help for more details"
+ echo ""
+ exit 0
+fi
+
+fi
+
+if [ "$1" == "--help" ]; then
+ echo ""
+ echo "This script will setup the kubeedge installation on Eliot master and Eliot edge"
+ echo "Before Executing this, add Eliot master and Eliot edge details in config_kubeedge file"
+ echo ""
+ exit 0; set -x;
+fi
+
+# take_keedge will download the source code of kubeedge in master and in edge
+
+take_keedge(){
+
+ source ~/.profile
+ git clone https://github.com/kubeedge/kubeedge.git \
+ $KUBEEDGE_SRC
+ cd $KUBEEDGE_BIN
+ make
+}
+
+source config_kubeedge > /dev/null 2>&1
+
+common_steps="echo $GOPATH && \
+git clone https://github.com/kubeedge/kubeedge.git $KUBEEDGE_SRC && \
+source ~/.profile && \
+cd $GOPATH/src && \
+sudo chmod -R 777 github.com && \
+cd $KUBEEDGE_BIN && \
+make"
+
+edge_start="cd $KUBEEDGE_BIN && \
+sudo chmod +x keadm && \
+sudo ./keadm join --edgecontrollerip=$MASTERNODEIP --edgenodeid=$EDGENODEID \
+--k8sserverip=$MASTERNODEIP:8080"
+
+# Initialisation of ELIOT master with kubeedge
+
+execute_keedge_controller(){
+ cd $KUBEEDGE_BIN
+ sudo chmod +x keadm
+ sudo ./keadm init
+}
+
+# Initialisation of Eliot edge with kubeedge
+
+exec_edge(){
+
+ cd $PATH_OF_ELIOTFOLDER/scripts/src
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp $PATH_OF_ELIOTFOLDER/scripts/src/config_kubeedge \
+ ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ source config_kubeedge
+
+ source config_kubeedge > /dev/null 2>&1
+ sshpass -p ${EDGENODEPASSWORD} \
+ ssh ${EDGENODEUSR}@${EDGENODEIP} ${common_steps}
+
+ echo "After cloning the code in ELIOT edge node"
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp /etc/kubeedge/certs.tgz ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ sudo tar -xvzf $HOME/certs.tgz --directory /etc/kubeedge
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ ssh ${EDGENODEUSR}@${EDGENODEIP} ${edge_start}
+}
+
+# start
+
+source config_kubeedge > /dev/null 2>&1
+
+take_keedge
+
+execute_keedge_controller
+
+exec_edge > /dev/null 2>&1
+
+sleep 10
+sudo kubectl get nodes
+
+if [ "$(id -u)" = 0 ]; then
+ echo "export KUBECONFIG=/etc/kubernetes/admin.conf" | \
+tee -a "${HOME}/.profile"
+ source "${HOME}/.profile"
+else
+ mkdir -p "${HOME}/.kube"
+ sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+ sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
+fi
+
+chmod +x $VERIFY_K8S
+source $VERIFY_K8S
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+NGINXDEP=~/testk8s-nginx.yaml
+
+cat <<EOF > "${NGINXDEP}"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.15.12
+ ports:
+ - containerPort: 80
+ hostPort: 80
+EOF
+
+#check if nginx is already deployed
+if ! kubectl get pods | grep nginx; then
+ kubectl create -f ~/testk8s-nginx.yaml
+fi
+
+#To check whether the deployment is succesesfull
+retry=10
+while [ $retry -gt 0 ]
+do
+ if [ 2 == "$(kubectl get pods | grep -c -e STATUS -e Running)" ]; then
+ break
+ fi
+ ((retry-=1))
+ sleep 10
+done
+[ $retry -gt 0 ] || exit 1