--- /dev/null
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+/*
+Package cmd
+Copyright © 2019 NAME HERE <EMAIL ADDRESS>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "elcli/cmd/util"
+)
+
+var (
+ elcliCleanCmdLongDescription =
+ `
+ +-------------------------------------------------+
+ | To complete cleanup execute this command. The |
+ | command will remove all the configurations, |
+ | clear up ports used by ELIOT cluster & uninstall|
+ | all software. |
+ +-------------------------------------------------|
+ `
+ elcliCleanExample = `
+ +-------------------------------------------------+
+ | elcli clean |
+ +-------------------------------------------------+
+ `
+)
+
+// cleanCmd represents the clean command
+var cleanCmd = &cobra.Command{
+ Use: "clean",
+ Short: "ELIOT Cluster Uninstall",
+ Long: elcliCleanCmdLongDescription,
+ Example: elcliCleanExample,
+ RunE: func(cmd *cobra.Command, args []string) error{
+ fmt.Println("clean called")
+ err := util.EliotClean()
+ str:= util.GetOSVersion()
+ fmt.Println("Print value of GetOSVersion %s", str)
+
+ if(err != nil) {
+ return err
+ }
+ return nil
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(cleanCmd)
+}
--- /dev/null
+/*
+Copyright 2019 The ELIOT Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+const (
+ //DefaultDockerVersion is the current default version of Docker
+ DefaultDockerVersion = "18.06.0"
+
+ //DefaultK8SVersion is the current default version of K8S
+ DefaultK8SVersion = "1.14.1"
+
+ // K8SImageRepository sets the image repository of Kubernetes
+ K8SImageRepository = "image-repository"
+
+ //K8SPodNetworkCidr sets pod network cidr of Kubernetes
+ K8SPodNetworkCidr = "pod-network-cidr"
+
+ //DockerVersion sets the version of Docker to be used
+ DockerVersion = "docker-version"
+
+ //KubernetesVersion sets the version of Kuberneted to be used
+ KubernetesVersion = "kubernetes-version"
+
+ //K8SAPIServerIPPort sets the IP:Port of Kubernetes api-server
+ K8SAPIServerIPPort = "k8sserverip"
+
+ //EliotCloudNodeIP sets the IP of KubeEdge cloud component
+ EliotCloudNodeIP = "eliotcloudnodeip"
+
+ //EliotEdgeNodeID Node unique idenfitcation string
+ EliotEdgeNodeID = "eliotedgenodeid"
+)
\ No newline at end of file
--- /dev/null
+/* Copyright 2019 The ELIOT Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+var (
+ //Setup having option.
+ Setup string
+ //Masters list
+ Masters []string
+ //Nodes list
+ Nodes []string
+)
+
+// InitOptions Strucutre
+type InitOptions struct {
+ KubernetesVersion string
+ DockerVersion string
+ K8SImageRepository string
+ K8SPodNetworkCidr string
+}
+
+//JoinOptions has the kubeedge cloud init information filled by CLI
+type JoinOptions struct {
+ InitOptions
+ CertPath string
+ CloudCoreIP string
+ K8SAPIServerIPPort string
+ EdgeNodeID string
+}
+
+//InstallState enum set used for verifying a tool version is installed in host
+type InstallState uint8
+
+//Difference enum values for type InstallState
+const (
+ NewInstallRequired InstallState = iota
+ AlreadySameVersionExist
+ DefVerInstallRequired
+ VersionNAInRepo
+)
+
+//ModuleRunning is defined to know the running status of KubeEdge components
+type ModuleRunning uint8
+
+//Different possible values for ModuleRunning type
+const (
+ NoneRunning ModuleRunning = iota
+ KubeEdgeCloudRunning
+ KubeEdgeEdgeRunning
+)
+
+//ToolsInstaller interface for tools with install and teardown methods.
+type ToolsInstaller interface {
+ InstallTools() error
+ TearDown() error
+}
+
+//OSTypeInstaller interface for methods to be executed over a specified OS distribution type
+type OSTypeInstaller interface {
+ IsToolVerInRepo(string, string) (bool, error)
+ IsDockerInstalled(string) (InstallState, error)
+ InstallDocker() error
+ IsK8SComponentInstalled(string, string) (InstallState, error)
+ InstallK8S() error
+ StartK8Scluster() error
+ SetDockerVersion(string)
+ SetK8SVersionAndIsNodeFlag(version string, flag bool)
+ SetK8SImageRepoAndPodNetworkCidr(string, string)
+ }
+
+//FlagData stores value and default value of the flags used in this command
+type FlagData struct {
+ Val interface{}
+ DefVal interface{}
+}
--- /dev/null
+//
+// Copyright © 2019 NAME HERE <EMAIL ADDRESS>
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+
+
+ "github.com/spf13/cobra"
+
+ "elcli/cmd/util"
+)
+
+var (
+elcliSetupCmdDescription =
+`
+ELIOT init command is for setting up the ELIOT Cluster.
+The command has options to setup the complete setup which includes
+ELIOT Manager and ELIOT Edge Nodes and to setup only ELIOT Manager
+or ELIOT Edge Node. The command invokes setup.sh script which handles
+the complete setup.
+
+The Details of ELIOT Edge Nodes must be present in [nodelist] file.
+`
+)
+// initCmd represents the init command
+var initCmd = &cobra.Command{
+ Use: "init",
+ Short: "Setup ELIOT Cluster !!",
+ Long: elcliSetupCmdDescription,
+ //It will check if the kubernetes process is already running on the node.
+ //Abort the operation if already running.
+ PreRunE: func(cmd *cobra.Command,args []string) error {
+ isELIOTClusterRunning, err := util.IsK8SClusterRunning()
+ if err != nil {
+ return err
+ } else if (isELIOTClusterRunning) {
+ return fmt.Errorf("Kubernetes Cluster is running in the Node. Clean up the environment and then setup the Cluster")
+ }
+ return nil
+ },
+ RunE: func(cmd *cobra.Command, args []string) error{
+ fmt.Println("init called")
+ setupFlag := cmd.Flag("setup")
+ setupflagoption := setupFlag.Value.String()
+
+ switch setupflagoption {
+ case "all":
+ err:= util.EliotSetupAll()
+ return err
+ fmt.Println("Inside all option for setup flag")
+ case "master":
+ fmt.Println("Its eliot setup Master")
+ err:= util.EliotSetupMaster()
+ return err
+ default:
+ fmt.Println("Provide option for flag [--setup :- all | master] or [-s :- all | master]")
+ }
+ return nil
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(initCmd)
+ initCmd.Flags().StringP("setup","s","all","Eliot Topology setup options")
+
+}
--- /dev/null
+/*
+Package cmd
+Copyright © 2019 NAME HERE <EMAIL ADDRESS>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+ types "elcli/cmd/common"
+ "elcli/cmd/util"
+)
+
+var joinOptions = &types.JoinOptions{}
+
+// joinCmd represents the join command
+var joinCmd = &cobra.Command{
+ Use: "join",
+ Short: "A brief description of your command",
+ Long: `A longer description that spans multiple lines and likely contains examples
+ and usage of using your command. For example:
+
+ Cobra is a CLI library for Go that empowers applications.
+ This application is a tool to generate the needed files
+ to quickly create a Cobra application.`,
+
+ RunE: func(cmd *cobra.Command, args []string) error {
+ fmt.Println("join called")
+
+ tools := make(map[string]types.ToolsInstaller, 0)
+ flagVals := make(map[string]types.FlagData, 0)
+
+ checkFlags := func(f *pflag.Flag) {
+ util.AddToolVals(f, flagVals)
+ }
+ cmd.Flags().VisitAll(checkFlags)
+
+ //joinOptions := &types.JoinOptions{}
+ joinOptions = newJoinOptions()
+
+ Add2ToolsList(tools, flagVals, joinOptions)
+ return ExecuteTool(tools)
+
+ },
+}
+
+func init() {
+
+ // Join Command added as sub-command for main command : elcli
+ rootCmd.AddCommand(joinCmd)
+
+ joinCmd.Flags().StringVar(&joinOptions.DockerVersion, types.DockerVersion, joinOptions.DockerVersion,
+ "Use this key to download and use the required Docker version")
+ joinCmd.Flags().Lookup(types.DockerVersion).NoOptDefVal = joinOptions.DockerVersion
+
+ joinCmd.Flags().StringVar(&joinOptions.KubernetesVersion, types.KubernetesVersion, joinOptions.KubernetesVersion,
+ "Use this key to download and use the required Kubernetes version")
+ joinCmd.Flags().Lookup(types.KubernetesVersion).NoOptDefVal = joinOptions.KubernetesVersion
+
+ joinCmd.Flags().StringVar(&joinOptions.K8SImageRepository, types.K8SImageRepository, joinOptions.K8SImageRepository,
+ "Use this key to set the Kubernetes docker image repository")
+ joinCmd.Flags().Lookup(types.K8SImageRepository).NoOptDefVal = joinOptions.K8SImageRepository
+
+}
+
+
+func newJoinOptions() *types.JoinOptions {
+ fmt.Println("Inside newJointOptions Method.....")
+ opts := &types.JoinOptions{}
+ opts.InitOptions = types.InitOptions{DockerVersion: types.DefaultDockerVersion, KubernetesVersion: types.DefaultK8SVersion}
+ //opts.CertPath = types.DefaultCertPath
+ return opts
+}
+
+
+//Add2ToolsList Reads the flagData (containing val and default val) and join options to fill the list of tools.
+func Add2ToolsList(toolList map[string]types.ToolsInstaller, flagData map[string]types.FlagData, joinOptions *types.JoinOptions) {
+
+ var k8sVer, dockerVer string
+ /*var k8sImageRepo string
+
+ flgData, ok := flagData[types.K8SImageRepository]
+ if ok {
+ k8sImageRepo = util.CheckIfAvailable(flgData.Val.(string), flgData.DefVal.(string))
+ } else {
+ k8sImageRepo = joinOptions.K8SImageRepository
+ }
+
+ */
+
+ //toolList["EliotEdge"] = &util.KubeEdgeInstTool{Common: util.Common{ToolVersion: kubeVer}, K8SApiServerIP: joinOptions.K8SAPIServerIPPort,
+ // CloudCoreIP: joinOptions.CloudCoreIP, EdgeNodeID: joinOptions.EdgeNodeID}
+
+ flgData, ok := flagData[types.DockerVersion]
+ if ok {
+ dockerVer = util.CheckIfAvailable(flgData.Val.(string), flgData.DefVal.(string))
+ } else {
+ dockerVer = joinOptions.DockerVersion
+ }
+ toolList["Docker"] = &util.DockerInstTool{Common: util.Common{ToolVersion: dockerVer}, DefaultToolVer: flgData.DefVal.(string)}
+
+
+ flgData, ok = flagData[types.KubernetesVersion]
+ if ok {
+ k8sVer = util.CheckIfAvailable(flgData.Val.(string), flgData.DefVal.(string))
+ } else {
+ k8sVer = joinOptions.KubernetesVersion
+ }
+ toolList["Kubernetes"] = &util.K8SInstTool{Common: util.Common{ToolVersion: k8sVer}, IsEdgeNode: false, DefaultToolVer: flgData.DefVal.(string)}
+
+
+}
+
+//ExecuteTool the instalation for each tool and start edgecore
+func ExecuteTool(toolList map[string]types.ToolsInstaller) error {
+
+ //Install all the required pre-requisite tools
+ for name, tool := range toolList {
+ if name != "EliotEdge" {
+ err := tool.InstallTools()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ //Install and Start ElioteEdge Node
+ return toolList["ElioteEdge"].InstallTools()
+}
\ No newline at end of file
--- /dev/null
+/*
+Copyright © 2019 NAME HERE <EMAIL ADDRESS>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "elcli/cmd/util"
+)
+
+var (
+
+ elcliResetCmdLongDescription = `
++-----------------------------------------------------------+
+| To Reset the ELIOT Cluster. |
+| |
++-----------------------------------------------------------+
+| RESET: It will reset the setting and the kubernetes |
+| cluster, underying softwares of ELIOT platform will not be|
+| still installed. |
++-----------------------------------------------------------+
+`
+)
+
+// resetCmd represents the reset command
+var resetCmd = &cobra.Command{
+ Use: "reset",
+ Short: "Reset ELIOT Cluster!!",
+ Long: elcliResetCmdLongDescription,
+ RunE: func(cmd *cobra.Command, args []string) error{
+ fmt.Println("reset called")
+ err := util.EliotReset()
+ str:= util.GetOSVersion()
+ if (err != nil){
+ return err
+ }
+ fmt.Println("Print value of GetOSVersion", str)
+ return nil
+ },
+}
+
+func init() {
+ rootCmd.AddCommand(resetCmd)
+}
--- /dev/null
+/*
+Copyright © 2019 NAME HERE <EMAIL ADDRESS>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "os"
+ "github.com/spf13/cobra"
+
+ homedir "github.com/mitchellh/go-homedir"
+ "github.com/spf13/viper"
+
+)
+
+
+var cfgFile string
+
+var (
+ elcliLongDescription = `
+ +----------------------------------------------------------+
+ | ELCLI |
+ | Command Line Interface to Bootsrap ELIOT Cluster |
+ +----------------------------------------------------------+
+
+ ELCLI Command is use to setup the ELIOT Cluster.
+ It installs the ELIOT Manager and the ELIOT Nodes.
+ ELIOT Manager is the core-controller and the ELIOT Nodes are
+ the edge nodes which acts as IOT Gateway or uCPE, where the
+ application PODS will be running.
+ `
+ elcliExample = `
+ +-----------------------------------------------------------+
+ |To setup up the ELIOT Cluster the elcli init command has to|
+ |be executed in the ELIOT Manager Node. |
+ | |
+ |Example : |
+ |elcli init |
+ | |
+ +-----------------------------------------------------------+
+
+ `
+)
+
+
+// rootCmd represents the base command when called without any subcommands
+var rootCmd = &cobra.Command{
+ Use: "elcli",
+ Short: "elcli : Bootstarp ELIOT Cluster",
+ Long: elcliLongDescription,
+ Example: elcliExample,
+}
+
+// Execute adds all child commands to the root command and sets flags appropriately.
+// This is called by main.main(). It only needs to happen once to the rootCmd.
+func Execute() {
+ if err := rootCmd.Execute(); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+}
+
+func init() {
+ cobra.OnInitialize(initConfig)
+
+}
+
+
+// initConfig reads in config file and ENV variables if set.
+func initConfig() {
+ if cfgFile != "" {
+ // Use config file from the flag.
+ viper.SetConfigFile(cfgFile)
+ } else {
+ // Find home directory.
+ home, err := homedir.Dir()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+
+ // Search config in home directory with name ".elcli" (without extension).
+ viper.AddConfigPath(home)
+ viper.SetConfigName(".elcli")
+ }
+
+ viper.AutomaticEnv() // read in environment variables that match
+
+ // If a config file is found, read it in.
+ if err := viper.ReadInConfig(); err == nil {
+ fmt.Println("Using config file:", viper.ConfigFileUsed())
+ }
+}
+
--- /dev/null
+/*
+Copyright 2019 The Kubeedge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ "os/exec"
+
+ types "elcli/cmd/common"
+
+)
+
+//CentOS struct objects shall have information of the tools version to be installed
+//on Hosts having Ubuntu OS.
+//It implements OSTypeInstaller interface
+type CentOS struct {
+ DockerVersion string
+ KubernetesVersion string
+ IsEdgeNode bool //True - Edgenode False - Cloudnode
+ K8SImageRepository string
+ K8SPodNetworkCidr string
+}
+
+//SetDockerVersion sets the Docker version for the objects instance
+func (c *CentOS) SetDockerVersion(version string) {
+ c.DockerVersion = version
+}
+
+//SetK8SVersionAndIsNodeFlag sets the K8S version for the objects instance
+//It also sets if this host shall act as edge node or not
+func (c *CentOS) SetK8SVersionAndIsNodeFlag(version string, flag bool) {
+ c.KubernetesVersion = version
+ c.IsEdgeNode = flag
+}
+
+//SetK8SImageRepoAndPodNetworkCidr sets the K8S image Repository and pod network
+// cidr.
+func (c *CentOS) SetK8SImageRepoAndPodNetworkCidr(repo, cidr string) {
+ c.K8SImageRepository = repo
+ c.K8SPodNetworkCidr = cidr
+}
+
+//IsDockerInstalled checks if docker is installed in the host or not
+func (c *CentOS) IsDockerInstalled(string) (types.InstallState, error) {
+ //yum list installed | grep docker-ce | awk '{print $2}' | cut -d'-' -f 1
+ //18.06.1.ce
+
+ return types.VersionNAInRepo, nil
+
+}
+
+//InstallDocker will install the specified docker in the host
+func (c *CentOS) InstallDocker() error {
+ fmt.Println("InstallDocker called")
+ // yum install -y yum-utils
+ // yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
+ // yum makecache
+ // yum list --showduplicates 'docker-ce' | grep '17.06.0' | head -1 | awk '{print $2}'
+ // yum install -y docker-ce-17.06.0.ce-1.el7.centos
+ // [root@localhost ~]# systemctl start docker
+ // [root@localhost ~]# ---> Always restart systemctl restart docker
+ // [root@localhost ~]#
+ // IF downgrade yum downgrade -y docker-ce-17.06.0.ce-1.el7.centos
+ // Check always for version, if it is a downgrade or upgrade
+
+ return nil
+}
+
+//IsToolVerInRepo checks if the tool mentioned in available in OS repo or not
+func (c *CentOS) IsToolVerInRepo(toolName, version string) (bool, error) {
+ //yum --cacheonly list | grep openssl
+ //For K8S, dont check in repo, just install
+ fmt.Println("IsToolVerInRepo called")
+ return false, nil
+}
+
+//InstallMQTT checks if MQTT is already installed and running, if not then install it from OS repo
+//Information is used from https://www.digitalocean.com/community/tutorials/how-to-install-and-secure-the-mosquitto-mqtt-messaging-broker-on-centos-7
+func (c *CentOS) InstallMQTT() error {
+
+ //yum -y install epel-release
+ cmd := &Command{Cmd: exec.Command("sh", "-c", "yum -y install epel-release")}
+ err := cmd.ExecuteCmdShowOutput()
+ stdout := cmd.GetStdOutput()
+ errout := cmd.GetStdErr()
+ if err != nil || errout != "" {
+ return fmt.Errorf("%s", errout)
+ }
+ fmt.Println(stdout)
+
+ //yum -y install mosquitto
+ cmd = &Command{Cmd: exec.Command("sh", "-c", "yum -y install mosquitto")}
+ err = cmd.ExecuteCmdShowOutput()
+ stdout = cmd.GetStdOutput()
+ errout = cmd.GetStdErr()
+ if err != nil || errout != "" {
+ return fmt.Errorf("%s", errout)
+ }
+ fmt.Println(stdout)
+
+
+ //systemctl start mosquitto
+ cmd = &Command{Cmd: exec.Command("sh", "-c", "systemctl start mosquitto")}
+ cmd.ExecuteCommand()
+ stdout = cmd.GetStdOutput()
+ errout = cmd.GetStdErr()
+ if errout != "" {
+ return fmt.Errorf("%s", errout)
+ }
+ fmt.Println(stdout)
+
+ //systemctl enable mosquitto
+ cmd = &Command{Cmd: exec.Command("sh", "-c", "systemctl enable mosquitto")}
+ cmd.ExecuteCommand()
+ stdout = cmd.GetStdOutput()
+ errout = cmd.GetStdErr()
+ if errout != "" {
+ return fmt.Errorf("%s", errout)
+ }
+ fmt.Println(stdout)
+
+
+ return nil
+}
+
+//IsK8SComponentInstalled checks if said K8S version is already installed in the host
+func (c *CentOS) IsK8SComponentInstalled(component, defVersion string) (types.InstallState, error) {
+ // [root@localhost ~]# yum list installed | grep kubeadm | awk '{print $2}' | cut -d'-' -f 1
+ // 1.14.1
+ // [root@localhost ~]#
+ // [root@localhost ~]# yum list installed | grep kubeadm
+ // kubeadm.x86_64 1.14.1-0 @kubernetes
+ // [root@localhost ~]#
+
+ return types.VersionNAInRepo, nil
+}
+
+//InstallK8S will install kubeadm, kudectl and kubelet for the cloud node
+func (c *CentOS) InstallK8S() error {
+ fmt.Println("InstallK8S called")
+ //Follow https://kubernetes.io/docs/setup/independent/install-kubeadm/
+ return nil
+}
+
+//StartK8Scluster will do "kubeadm init" and cluster will be started
+func (c *CentOS) StartK8Scluster() error {
+ return nil
+}
\ No newline at end of file
--- /dev/null
+package util
+
+
+import (
+ "fmt"
+ "os/exec"
+)
+// EliotClean function to reset the ELiot Topology
+func EliotClean() error {
+ fmt.Println("Inside EliotClean Function")
+
+ cdEliotScripts := fmt.Sprintf("cd ~/eliot/scripts/ && ls -l")
+ shCleanEliotTopology := fmt.Sprintf("cd ~/eliot/scripts/ && bash kubernetes_cleanup.sh")
+ cmd := &Command{Cmd: exec.Command("bash", "-c", cdEliotScripts)}
+ cmd.ExecuteCommand()
+
+ stdout := cmd.GetStdOutput()
+ errout := cmd.GetStdErr()
+
+ if errout != "" {
+ return fmt.Errorf("Error Output .. %s", errout)
+ }
+ fmt.Println("Output is .... ", stdout)
+
+ stdout, err := runCommandWithShell(shCleanEliotTopology)
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ return nil
+}
+
+// EliotReset function to Reset the ELIOT Cluster.
+func EliotReset() error {
+ fmt.Println("Inside EliotReset Function")
+
+ cdEliotScripts := fmt.Sprintf("cd ~/eliot/scripts/ && ls -l")
+ shResetEliotTopology := fmt.Sprintf("cd ~/eliot/scripts/ && bash kubernetes_reset.sh")
+ cmd := &Command{Cmd: exec.Command("sh", "-c", cdEliotScripts)}
+ cmd.ExecuteCommand()
+
+ stdout := cmd.GetStdOutput()
+ errout := cmd.GetStdErr()
+
+ if errout != "" {
+ return fmt.Errorf("Error Output .. %s", errout)
+ }
+ fmt.Println("Output is .... \n ", stdout)
+ return nil
+
+ stdout, err := runCommandWithShell(shResetEliotTopology)
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ return nil
+}
\ No newline at end of file
--- /dev/null
+/*
+Copyright 2019 The ELIOT Team .
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+ "os/exec"
+ "strings"
+ "sync"
+
+
+ "github.com/spf13/pflag"
+ types "elcli/cmd/common"
+)
+
+//Constants used by installers
+const (
+ UbuntuOSType = "ubuntu"
+ CentOSType = "centos"
+
+ DefaultDownloadURL = "https://download.docker.com"
+ DockerPreqReqList = "apt-transport-https ca-certificates curl gnupg-agent software-properties-common"
+
+ KubernetesDownloadURL = "https://apt.kubernetes.io/"
+ KubernetesGPGURL = "https://packages.cloud.google.com/apt/doc/apt-key.gpg"
+
+ KubeAPIServerName = "kube-apiserver"
+)
+
+//AddToolVals gets the value and default values of each flags and collects them in temporary cache
+func AddToolVals(f *pflag.Flag, flagData map[string]types.FlagData) {
+ flagData[f.Name] = types.FlagData{Val: f.Value.String(), DefVal: f.DefValue}
+}
+
+//CheckIfAvailable checks is val of a flag is empty then return the default value
+func CheckIfAvailable(val, defval string) string {
+ if val == "" {
+ return defval
+ }
+ return val
+}
+
+//Common struct contains OS and Tool version properties and also embeds OS interface
+type Common struct {
+ types.OSTypeInstaller
+ OSVersion string
+ ToolVersion string
+ KubeConfig string
+}
+
+//SetOSInterface defines a method to set the implemtation of the OS interface
+func (co *Common) SetOSInterface(intf types.OSTypeInstaller) {
+ co.OSTypeInstaller = intf
+}
+
+//Command defines commands to be executed and captures std out and std error
+type Command struct {
+ Cmd *exec.Cmd
+ StdOut []byte
+ StdErr []byte
+}
+
+//ExecuteCommand executes the command and captures the output in stdOut
+func (cm *Command) ExecuteCommand() {
+ var err error
+ cm.StdOut, err = cm.Cmd.Output()
+ if err != nil {
+ fmt.Println("Output failed: ", err)
+ cm.StdErr = []byte(err.Error())
+ }
+}
+
+//GetStdOutput gets StdOut field
+func (cm Command) GetStdOutput() string {
+ if len(cm.StdOut) != 0 {
+ return strings.TrimRight(string(cm.StdOut), "\n")
+ }
+ return ""
+}
+
+//GetStdErr gets StdErr field
+func (cm Command) GetStdErr() string {
+ if len(cm.StdErr) != 0 {
+ return strings.TrimRight(string(cm.StdErr), "\n")
+ }
+ return ""
+}
+
+//ExecuteCmdShowOutput captures both StdOut and StdErr after exec.cmd().
+//It helps in the commands where it takes some time for execution.
+func (cm Command) ExecuteCmdShowOutput() error {
+ var stdoutBuf, stderrBuf bytes.Buffer
+ stdoutIn, _ := cm.Cmd.StdoutPipe()
+ stderrIn, _ := cm.Cmd.StderrPipe()
+
+ var errStdout, errStderr error
+ stdout := io.MultiWriter(os.Stdout, &stdoutBuf)
+ stderr := io.MultiWriter(os.Stderr, &stderrBuf)
+ err := cm.Cmd.Start()
+ if err != nil {
+ return fmt.Errorf("failed to start '%s' because of error : %s", strings.Join(cm.Cmd.Args, " "), err.Error())
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ go func() {
+ _, errStdout = io.Copy(stdout, stdoutIn)
+ wg.Done()
+ }()
+
+ _, errStderr = io.Copy(stderr, stderrIn)
+ wg.Wait()
+
+ err = cm.Cmd.Wait()
+ if err != nil {
+ return fmt.Errorf("failed to run '%s' because of error : %s", strings.Join(cm.Cmd.Args, " "), err.Error())
+ }
+ if errStdout != nil || errStderr != nil {
+ return fmt.Errorf("failed to capture stdout or stderr")
+ }
+
+ cm.StdOut, cm.StdErr = stdoutBuf.Bytes(), stderrBuf.Bytes()
+ return nil
+}
+
+//GetOSVersion gets the OS name
+func GetOSVersion() string {
+ c := &Command{Cmd: exec.Command("sh", "-c", ". /etc/os-release && echo $ID")}
+ c.ExecuteCommand()
+ return c.GetStdOutput()
+}
+
+//GetOSInterface helps in returning OS specific object which implements OSTypeInstaller interface.
+func GetOSInterface() types.OSTypeInstaller {
+
+ switch GetOSVersion() {
+ case UbuntuOSType:
+ return &UbuntuOS{}
+ case CentOSType:
+ return &CentOS{}
+ default:
+ panic("This OS version is currently un-supported by keadm")
+ }
+}
+
+// IsCloudCore identifies if the node is having cloudcore and kube-apiserver already running.
+// If so, then return true, else it can used as edge node and initialise it.
+func IsCloudCore() (types.ModuleRunning, error) {
+ //osType := GetOSInterface()
+
+ //If any of cloudcore or K8S API server is running, then we believe the node is cloud node
+
+ return types.NoneRunning, nil
+}
+
+//IsK8SClusterRunning check whether Kubernetes Master is running already on the server in which ELIOT Setup command is executed
+//Currently there is no check on the ELIOT Edge Nodes.
+func IsK8SClusterRunning() (bool, error) {
+ shK8SClusterRunning := fmt.Sprintf("ps aux | grep kube- | grep -v grep | wc -l")
+ cmd := &Command {Cmd : exec.Command ("sh" , "-c" ,shK8SClusterRunning)}
+ cmd.ExecuteCommand()
+ stdOut:= cmd.GetStdOutput()
+ errOut:= cmd.GetStdErr()
+
+ if errOut != "" {
+ return false, fmt.Errorf("%s", errOut)
+ }
+ if stdOut != "" {
+ return true, nil
+ }
+ return false,nil
+
+}
+
+// runCommandWithShell executes the given command with "sh -c".
+// It returns an error if the command outputs anything on the stderr.
+func runCommandWithShell(command string) (string, error) {
+ cmd := &Command{Cmd: exec.Command("sh", "-c", command)}
+ err := cmd.ExecuteCmdShowOutput()
+ if err != nil {
+ return "", err
+ }
+ errout := cmd.GetStdErr()
+ if errout != "" {
+ return "", fmt.Errorf("%s", errout)
+ }
+ return cmd.GetStdOutput(), nil
+}
--- /dev/null
+/*
+Copyright 2019 The Kubeedge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+
+ types "elcli/cmd/common"
+)
+
+//DockerInstTool embedes Common struct and contains the default docker version
+//It implements ToolsInstaller interface
+type DockerInstTool struct {
+ Common
+ DefaultToolVer string
+}
+
+//InstallTools sets the OS interface, checks if docker installation is required or not.
+//If required then install the said version.
+func (d *DockerInstTool) InstallTools() error {
+ d.SetOSInterface(GetOSInterface())
+ d.SetDockerVersion(d.ToolVersion)
+
+ action, err := d.IsDockerInstalled(d.DefaultToolVer)
+ if err != nil {
+ return err
+ }
+ switch action {
+ case types.VersionNAInRepo:
+ return fmt.Errorf("Expected Docker version is not available in OS repo")
+ case types.AlreadySameVersionExist:
+ fmt.Println("Same version of docker already installed in this host")
+ return nil
+ case types.DefVerInstallRequired:
+ d.SetDockerVersion(d.DefaultToolVer)
+ fallthrough
+ case types.NewInstallRequired:
+ err := d.InstallDocker()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("Error in getting the docker version from host")
+ }
+
+ return nil
+}
+
+//TearDown shoud uninstall docker, but it is not required either for cloud or edge node.
+//It is defined so that DockerInstTool implements ToolsInstaller interface
+func (d *DockerInstTool) TearDown() error {
+ return nil
+}
--- /dev/null
+/*
+Copyright 2019 The Eliot Team.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+
+ types "elcli/cmd/common"
+)
+
+
+//K8SInstTool embedes Common struct and contains the default K8S version and
+//a flag depicting if host is an edge or cloud node
+//It implements ToolsInstaller interface
+type K8SInstTool struct {
+ Common
+ IsEdgeNode bool //True - Edgenode False - Cloudnode
+ DefaultToolVer string
+}
+
+
+//InstallTools sets the OS interface, checks if K8S installation is required or not.
+//If required then install the said version.
+func (ks *K8SInstTool) InstallTools() error {
+ ks.SetOSInterface(GetOSInterface())
+ ks.SetK8SVersionAndIsNodeFlag(ks.ToolVersion, ks.IsEdgeNode)
+
+ component := "kubeadm"
+ if ks.IsEdgeNode == true {
+ component = "kubectl"
+ }
+ action, err := ks.IsK8SComponentInstalled(component, ks.DefaultToolVer)
+ if err != nil {
+ return err
+ }
+ switch action {
+ case types.VersionNAInRepo:
+ return fmt.Errorf("Expected %s version is not available in OS repo", component)
+ case types.AlreadySameVersionExist:
+ fmt.Printf("Same version of %s already installed in this host", component)
+ return nil
+ case types.DefVerInstallRequired:
+ ks.SetK8SVersionAndIsNodeFlag(ks.DefaultToolVer, ks.IsEdgeNode)
+ fallthrough
+ case types.NewInstallRequired:
+ err := ks.InstallK8S()
+ if err != nil {
+ return err
+ }
+ default:
+ return fmt.Errorf("Error in getting the %s version from host", component)
+ }
+ return nil
+}
+
+//TearDown shoud uninstall K8S, but it is not required either for cloud or edge node.
+//It is defined so that K8SInstTool implements ToolsInstaller interface
+func (ks *K8SInstTool) TearDown() error {
+ return nil
+}
\ No newline at end of file
--- /dev/null
+package util
+
+import (
+ "fmt"
+ "os/exec"
+ //"github.com/spf13/elcli/cmd/common"
+)
+
+// EliotSetupAll function to reset the ELiot Topology
+func EliotSetupAll() error {
+ fmt.Println("Inside EliotSetupAll Function")
+
+
+ strCdEliotScripts := fmt.Sprintf("cd ~/eliot/scripts/ && ls -l")
+ strSetupAll := fmt.Sprintf("cd ~/eliot/scripts/ && bash setup.sh")
+ cmd := &Command{Cmd: exec.Command("bash", "-c", strCdEliotScripts)}
+ cmd.ExecuteCommand()
+
+ stdout := cmd.GetStdOutput()
+ errout := cmd.GetStdErr()
+ if errout != "" {
+ return fmt.Errorf("Error Output .. %s", errout)
+ }
+
+ fmt.Println("Output is .... ", stdout)
+
+ stdout, err := runCommandWithShell(strSetupAll)
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+ return nil
+}
+
+//EliotSetupMaster Setup Method.
+func EliotSetupMaster() error {
+ fmt.Println("Inside EliotSetupMaster Function")
+
+ strCdEliotScripts := fmt.Sprintf("cd ~/eliot/scripts/ && ls -l")
+
+ cmd := &Command{Cmd: exec.Command("bash", "-c", strCdEliotScripts)}
+ cmd.ExecuteCommand()
+
+ stdout := cmd.GetStdOutput()
+ errout := cmd.GetStdErr()
+ if errout != "" {
+ return fmt.Errorf("Error Output .. %s", errout)
+ }
+ fmt.Println("Output is .... ", stdout)
+
+ strSetupCommon := fmt.Sprintf("cd ~/eliot/scripts/ && bash common.sh")
+ stdout, err := runCommandWithShell(strSetupCommon)
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+ fmt.Println("Output is .... ", stdout)
+
+ strSetupk8sMaster := fmt.Sprintf("cd ~/eliot/scripts/ && bash k8smaster.sh")
+ stdout, err = runCommandWithShell(strSetupk8sMaster)
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ return nil
+}
\ No newline at end of file
--- /dev/null
+/*
+Copyright 2019 The Kubeedge Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "fmt"
+ //"os"
+ "os/exec"
+ "strings"
+
+ //types "github.com/kubeedge/kubeedge/keadm/app/cmd/common"
+ types "elcli/cmd/common"
+)
+
+const downloadRetryTimes int = 3
+
+// Ubuntu releases
+const (
+ UbuntuXenial = "xenial"
+ UbuntuBionic = "bionic"
+)
+
+//UbuntuOS struct objects shall have information of the tools version to be installed
+//on Hosts having Ubuntu OS.
+//It implements OSTypeInstaller interface
+type UbuntuOS struct {
+ DockerVersion string
+ KubernetesVersion string
+ KubeEdgeVersion string
+ IsEdgeNode bool //True - Edgenode False - EliotCloudnode
+ K8SImageRepository string
+ K8SPodNetworkCidr string
+}
+
+//SetDockerVersion sets the Docker version for the objects instance
+func (u *UbuntuOS) SetDockerVersion(version string) {
+ u.DockerVersion = version
+}
+
+//SetK8SVersionAndIsNodeFlag sets the K8S version for the objects instance
+//It also sets if this host shall act as edge node or not
+func (u *UbuntuOS) SetK8SVersionAndIsNodeFlag(version string, flag bool) {
+ u.KubernetesVersion = version
+ u.IsEdgeNode = flag
+}
+
+//SetK8SImageRepoAndPodNetworkCidr sets the K8S image Repository and pod network
+// cidr.
+func (u *UbuntuOS) SetK8SImageRepoAndPodNetworkCidr(repo, cidr string) {
+ u.K8SImageRepository = repo
+ u.K8SPodNetworkCidr = cidr
+}
+
+//SetKubeEdgeVersion sets the KubeEdge version for the objects instance
+func (u *UbuntuOS) SetKubeEdgeVersion(version string) {
+ u.KubeEdgeVersion = version
+}
+
+//IsToolVerInRepo checks if the tool mentioned in available in OS repo or not
+func (u *UbuntuOS) IsToolVerInRepo(toolName, version string) (bool, error) {
+ //Check if requested Docker or K8S components said version is available in OS repo or not
+
+ chkToolVer := fmt.Sprintf("apt-cache madison '%s' | grep -w %s | head -1 | awk '{$1=$1};1' | cut -d' ' -f 3", toolName, version)
+ cmd := &Command{Cmd: exec.Command("sh", "-c", chkToolVer)}
+ cmd.ExecuteCommand()
+ stdout := cmd.GetStdOutput()
+ errout := cmd.GetStdErr()
+
+ if errout != "" {
+ return false, fmt.Errorf("%s", errout)
+ }
+
+ if stdout != "" {
+ fmt.Println(toolName, stdout, "is available in OS repo")
+ return true, nil
+ }
+
+ fmt.Println(toolName, "version", version, "not found in OS repo")
+ return false, nil
+}
+
+func (u *UbuntuOS) addDockerRepositoryAndUpdate() error {
+ //lsb_release -cs
+ cmd := &Command{Cmd: exec.Command("sh", "-c", "lsb_release -cs")}
+ cmd.ExecuteCommand()
+ distVersion := cmd.GetStdOutput()
+ if distVersion == "" {
+ return fmt.Errorf("ubuntu dist version not available")
+ }
+ fmt.Println("Ubuntu distribution version is", distVersion)
+
+ //'apt-get update'
+ stdout, err := runCommandWithShell("apt-get update")
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ //"curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | apt-key add"
+ //Get the GPG key
+ curl := fmt.Sprintf("curl -fsSL \"%s/linux/%s/gpg\" | apt-key add", DefaultDownloadURL, UbuntuOSType)
+ cmd = &Command{Cmd: exec.Command("sh", "-c", curl)}
+ cmd.ExecuteCommand()
+ curlOutput := cmd.GetStdOutput()
+ if curlOutput == "" {
+ return fmt.Errorf("not able add the apt key")
+ }
+ fmt.Println(curlOutput)
+
+ //Add the repo in OS source.list
+ aptRepo := fmt.Sprintf("deb [arch=$(dpkg --print-architecture)] %s/linux/%s %s stable", DefaultDownloadURL, UbuntuOSType, distVersion)
+ updtRepo := fmt.Sprintf("echo \"%s\" > /etc/apt/sources.list.d/docker.list", aptRepo)
+ cmd = &Command{Cmd: exec.Command("sh", "-c", updtRepo)}
+ cmd.ExecuteCommand()
+ updtRepoErr := cmd.GetStdErr()
+ if updtRepoErr != "" {
+ return fmt.Errorf("not able add update repo due to error : %s", updtRepoErr)
+ }
+
+ //Do an apt-get update
+ stdout, err = runCommandWithShell("apt-get update")
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ return nil
+}
+
+//IsDockerInstalled checks if docker is installed in the host or not
+func (u *UbuntuOS) IsDockerInstalled(defVersion string) (types.InstallState, error) {
+ cmd := &Command{Cmd: exec.Command("sh", "-c", "docker -v | cut -d ' ' -f3 | cut -d ',' -f1")}
+ cmd.ExecuteCommand()
+ str := cmd.GetStdOutput()
+
+ if strings.Contains(str, u.DockerVersion) {
+ return types.AlreadySameVersionExist, nil
+ }
+
+ if err := u.addDockerRepositoryAndUpdate(); err != nil {
+ return types.VersionNAInRepo, err
+ }
+
+ if str == "" {
+ return types.NewInstallRequired, nil
+ }
+
+ isReqVerAvail, err := u.IsToolVerInRepo("docker-ce", u.DockerVersion)
+ if err != nil {
+ return types.VersionNAInRepo, err
+ }
+
+ var isDefVerAvail bool
+ if u.DockerVersion != defVersion {
+ isDefVerAvail, err = u.IsToolVerInRepo("docker-ce", defVersion)
+ if err != nil {
+ return types.VersionNAInRepo, err
+ }
+ }
+
+ if isReqVerAvail {
+ return types.NewInstallRequired, nil
+ }
+
+ if isDefVerAvail {
+ return types.DefVerInstallRequired, nil
+ }
+
+ return types.VersionNAInRepo, nil
+}
+
+//InstallDocker will install the specified docker in the host
+func (u *UbuntuOS) InstallDocker() error {
+ fmt.Println("Installing ", u.DockerVersion, "version of docker")
+
+ //Do an apt-get install
+ instPreReq := fmt.Sprintf("apt-get install -y %s", DockerPreqReqList)
+ stdout, err := runCommandWithShell(instPreReq)
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ //Get the exact version string from OS repo, so that it can search and install.
+ chkDockerVer := fmt.Sprintf("apt-cache madison 'docker-ce' | grep %s | head -1 | awk '{$1=$1};1' | cut -d' ' -f 3", u.DockerVersion)
+ cmd := &Command{Cmd: exec.Command("sh", "-c", chkDockerVer)}
+ cmd.ExecuteCommand()
+ stdout = cmd.GetStdOutput()
+ errout := cmd.GetStdErr()
+ if errout != "" {
+ return fmt.Errorf("%s", errout)
+ }
+
+ fmt.Println("Expected docker version to install is", stdout)
+
+ //Install docker-ce
+ dockerInst := fmt.Sprintf("apt-get install -y --allow-change-held-packages --allow-downgrades docker-ce=%s", stdout)
+ stdout, err = runCommandWithShell(dockerInst)
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ fmt.Println("Docker", u.DockerVersion, "version is installed in this Host")
+
+ return nil
+}
+
+//IsK8SComponentInstalled checks if said K8S version is already installed in the host
+func (u *UbuntuOS) IsK8SComponentInstalled(component, defVersion string) (types.InstallState, error) {
+
+ find := fmt.Sprintf("dpkg -l | grep %s | awk '{print $3}'", component)
+ cmd := &Command{Cmd: exec.Command("sh", "-c", find)}
+ cmd.ExecuteCommand()
+ str := cmd.GetStdOutput()
+
+ if strings.Contains(str, u.KubernetesVersion) {
+ return types.AlreadySameVersionExist, nil
+ }
+
+ if err := u.addK8SRepositoryAndUpdate(); err != nil {
+ return types.VersionNAInRepo, err
+ }
+
+ if str == "" {
+ return types.NewInstallRequired, nil
+ }
+
+ isReqVerAvail, err := u.IsToolVerInRepo(component, u.KubernetesVersion)
+ if err != nil {
+ return types.VersionNAInRepo, err
+ }
+
+ var isDefVerAvail bool
+ if u.KubernetesVersion != defVersion {
+ isDefVerAvail, _ = u.IsToolVerInRepo(component, defVersion)
+ if err != nil {
+ return types.VersionNAInRepo, err
+ }
+ }
+
+ if isReqVerAvail {
+ return types.NewInstallRequired, nil
+ }
+
+ if isDefVerAvail {
+ return types.DefVerInstallRequired, nil
+ }
+
+ return types.VersionNAInRepo, nil
+}
+
+func (u *UbuntuOS) addK8SRepositoryAndUpdate() error {
+ //Get the distribution version
+ cmd := &Command{Cmd: exec.Command("sh", "-c", "lsb_release -cs")}
+ cmd.ExecuteCommand()
+ distVersion := cmd.GetStdOutput()
+ if distVersion == "" {
+ return fmt.Errorf("ubuntu dist version not available")
+ }
+ fmt.Println("Ubuntu distribution version is", distVersion)
+ distVersionForSuite := distVersion
+ if distVersion == UbuntuBionic {
+ // No bionic-specific version is available on apt.kubernetes.io.
+ // Use xenial version instead.
+ distVersionForSuite = UbuntuXenial
+ }
+ suite := fmt.Sprintf("kubernetes-%s", distVersionForSuite)
+ fmt.Println("Deb suite to use:", suite)
+
+ //Do an apt-get update
+ stdout, err := runCommandWithShell("apt-get update")
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ //curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
+ //Get the GPG key
+ curl := fmt.Sprintf("curl -s %s | apt-key add -", KubernetesGPGURL)
+ cmd = &Command{Cmd: exec.Command("sh", "-c", curl)}
+ cmd.ExecuteCommand()
+ curlOutput := cmd.GetStdOutput()
+ curlErr := cmd.GetStdErr()
+ if curlOutput == "" || curlErr != "" {
+ return fmt.Errorf("not able add the apt key due to error : %s", curlErr)
+ }
+ fmt.Println(curlOutput)
+
+ //Add K8S repo to local apt-get source.list
+ aptRepo := fmt.Sprintf("deb %s %s main", KubernetesDownloadURL, suite)
+ updtRepo := fmt.Sprintf("echo \"%s\" > /etc/apt/sources.list.d/kubernetes.list", aptRepo)
+ cmd = &Command{Cmd: exec.Command("sh", "-c", updtRepo)}
+ cmd.ExecuteCommand()
+ updtRepoErr := cmd.GetStdErr()
+ if updtRepoErr != "" {
+ return fmt.Errorf("not able add update repo due to error : %s", updtRepoErr)
+ }
+
+ //Do an apt-get update
+ stdout, err = runCommandWithShell("apt-get update")
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+ return nil
+}
+
+//InstallK8S will install kubeadm, kudectl and kubelet for the cloud node
+func (u *UbuntuOS) InstallK8S() error {
+ k8sComponent := "kubeadm"
+ fmt.Println("Installing", k8sComponent, u.KubernetesVersion, "version")
+
+ //Get the exact version string from OS repo, so that it can search and install.
+ chkKubeadmVer := fmt.Sprintf("apt-cache madison '%s' | grep %s | head -1 | awk '{$1=$1};1' | cut -d' ' -f 3", k8sComponent, u.KubernetesVersion)
+ cmd := &Command{Cmd: exec.Command("sh", "-c", chkKubeadmVer)}
+ cmd.ExecuteCommand()
+ stdout := cmd.GetStdOutput()
+ errout := cmd.GetStdErr()
+ if errout != "" {
+ return fmt.Errorf("%s", errout)
+ }
+
+ fmt.Println("Expected K8S('", k8sComponent, "') version to install is", stdout)
+
+ //Install respective K8S components based on where it is being installed
+ k8sInst := fmt.Sprintf("apt-get install -y --allow-change-held-packages --allow-downgrades kubeadm=%s kubelet=%s kubectl=%s", stdout, stdout, stdout)
+ stdout, err := runCommandWithShell(k8sInst)
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ fmt.Println(k8sComponent, "version", u.KubernetesVersion, "is installed in this Host")
+
+ return nil
+}
+
+//StartK8Scluster will do "kubeadm init" and cluster will be started
+func (u *UbuntuOS) StartK8Scluster() error {
+ var install bool
+ cmd := &Command{Cmd: exec.Command("sh", "-c", "kubeadm version")}
+ cmd.ExecuteCommand()
+ str := cmd.GetStdOutput()
+ if str != "" {
+ install = true
+ } else {
+ install = false
+ }
+ if install == true {
+ k8sInit := fmt.Sprintf("swapoff -a && kubeadm init --image-repository \"%s\" --pod-network-cidr=%s", u.K8SImageRepository, u.K8SPodNetworkCidr)
+ stdout, err := runCommandWithShell(k8sInit)
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+
+ stdout, err = runCommandWithShell("mkdir -p $HOME/.kube && cp -r /etc/kubernetes/admin.conf $HOME/.kube/config && sudo chown $(id -u):$(id -g) $HOME/.kube/config")
+ if err != nil {
+ return err
+ }
+ fmt.Println(stdout)
+ } else {
+ return fmt.Errorf("kubeadm not installed in this host")
+ }
+ fmt.Println("Kubeadm init successfully executed")
+ return nil
+}
+// // runCommandWithShell executes the given command with "sh -c".
+// // It returns an error if the command outputs anything on the stderr.
+// func runCommandWithShell(command string) (string, error) {
+// cmd := &Command{Cmd: exec.Command("sh", "-c", command)}
+// err := cmd.ExecuteCmdShowOutput()
+// if err != nil {
+// return "", err
+// }
+// errout := cmd.GetStdErr()
+// if errout != "" {
+// return "", fmt.Errorf("%s", errout)
+// }
+// return cmd.GetStdOutput(), nil
+// }
--- /dev/null
+/*
+Copyright © 2019 NAME HERE <EMAIL ADDRESS>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package main
+
+import "elcli/cmd"
+
+func main() {
+ cmd.Execute()
+}
--- /dev/null
+Subproject commit 1485a34d5d5723fea214f5710708e19a831720e4
--- /dev/null
+Subproject commit cf7d376da96d9cecec7c7483cec2735efe54a410
--- /dev/null
+Subproject commit de8848e004dd33dc07a2947b3d76f618a7fc7ef1
--- /dev/null
+Subproject commit af06845cf3004701891bf4fdb884bfe4920b3727
--- /dev/null
+Subproject commit 3536a929edddb9a5b34bd6861dc4a9647cb459fe
--- /dev/null
+Subproject commit dba45d427ff48cfb9bcf633db3a8e43b7364e261
--- /dev/null
+Subproject commit 588a75ec4f32903aa5e39a2619ba6a4631e28424
--- /dev/null
+Subproject commit c01685bb8421cecb276fa517e91f757215f980b3
--- /dev/null
+Subproject commit 1c9c46d5c1cc2aaebdd1898c0680e85e8a44b36d
--- /dev/null
+Subproject commit 94f6ae3ed3bceceafa716478c5fbf8d29ca601a1
--- /dev/null
+Subproject commit 24fa6976df40757dce6aea913e7b81ade90530e1
--- /dev/null
+Subproject commit e02bc9eca55d5fc66221bc0aeeaaa77410603914
--- /dev/null
+Subproject commit 69b5b6104433beb2cb9c3ce00bdadf3c7c2d3f34
--- /dev/null
+/*
+
+*/
+
+package app
+
+import (
+ "flag"
+ "os"
+ "fmt"
+
+ "github.com/spf13/pflag"
+ "github.com/trial/app/cmd"
+)
+
+//Run executes commands
+func Run() error {
+ return "Hello World"
+}
--- /dev/null
+Subproject commit 959b441ac422379a43da2230f62be024250818b0
--- /dev/null
+Subproject commit fae7ac547cb717d141c433a2a173315e216b64c4
--- /dev/null
+Subproject commit 342b2e1fbaa52c93f31447ad2c6abc048c63e475
--- /dev/null
+Subproject commit f8d1dee965f76837e891ded19dd59ee264db8ddc
--- /dev/null
+Subproject commit 51d6538a90f86fe93ac480b35f37b2be17fef232
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+echo "***********************************************************************"
+echo "cadvisor installation-------------------------------------------STARTED"
+
+HOSTNAME=`hostname`
+sudo docker run \
+ --volume=/:/rootfs:ro \
+ --volume=/var/run:/var/run:ro \
+ --volume=/sys:/sys:ro \
+ --volume=/var/lib/docker/:/var/lib/docker:ro \
+ --volume=/dev/disk/:/dev/disk:ro \
+ --publish=8081:8080 \
+ --detach=true \
+ --name=cadvisor-${HOSTNAME} \
+ google/cadvisor:latest
+
+echo "cadvisor setup--------------------------------------------------SUCCESS"
+echo "***********************************************************************"
+
--- /dev/null
+# !/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+sudo kubeadm reset
+
+if [ -f "$HOME/testk8s-nginx.yaml" ]; then
+ cd $HOME && kubectl delete -f test-k8snginx.yaml && rm -rf testk8s-nginx.yaml
+ echo "testk8s-nginx.yaml cleaned"
+fi
+
+if [ -d "/var/lib/etcd" ]; then
+ sudo rm -rf /var/lib/etcd
+ echo "etcd cleaned"
+fi
+
+KUBEADM_RESET="sudo kubeadm reset"
+ETCD_CLEAN="sudo rm -rf /var/lib/etcd"
+CLEANUP_PROM_CADVISOR="cd eliot/scripts/ci_management && ./uninstall_cadvisor.sh"
+
+# Read all the Worker Node details from nodelist file.
+echo $(pwd)
+while read line
+do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${KUBEADM_RESET}
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${ETCD_CLEAN}
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${CLEANUP_PROM_CADVISOR}
+done < nodelist > /dev/null 2>&1
+
--- /dev/null
+#!/usr/bin/expect -f
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+spawn ./cleanup_centos.sh
+expect "Are you sure you want to proceed? "
+send "y\n"
+
+expect "Are you sure you want to proceed? "
+send "y\n"
+interact
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+TESTYAML="testk8s-nginx.yaml"
+
+# start
+
+source ../src/config_kubeedge > /dev/null 2>&1
+cd
+kubectl delete -f $TESTYAML
+
+exec_edge_master(){
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp ${PATH_OF_ELIOTFOLDER}/scripts/ci_management/cleanup_edge.sh \
+ ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ source cleanup_edge.sh
+
+ cd $PATH_OF_ELIOTFOLDER/scripts/ci_management
+ source cleanup_master.sh
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp ${PATH_OF_ELIOTFOLDER}/scripts/ci_management/cleanup_edge_final.sh \
+ ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ source cleanup_edge_final.sh
+
+}
+
+exec_edge_master > /dev/null 2>&1
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+NGINX=$(sudo docker ps | grep nginx | wc -l)
+KUBEPROXY=$(sudo docker ps | grep k8s.gcr.io | wc -l)
+CONSTZERO="0"
+
+# start
+
+source config_kubeedge > /dev/null 2>&1
+source ~/.profile
+
+cd
+
+if [ -d "$GOPATH/src/github.com/kubeedge/kubeedge/keadm" ]; then
+ cd $GOPATH/src/github.com/kubeedge/kubeedge/keadm
+ ./keadm reset --k8sserverip $MASTERNODEIP:8080
+fi
+
+cd /etc/kubeedge
+
+if [ -f "certs.tgz" ]; then
+ sudo rm -rf certs.tgz
+fi
+
+if [ -d "/etc/kubeedge/ca" ]; then
+ sudo rm -rf /etc/kubeedge/ca
+fi
+
+if [ -d "/etc/kubeedge/certs" ]; then
+ sudo rm -rf /etc/kubeedge/certs
+fi
+
+if [ -d "/root/go/src" ]; then
+ sudo rm -rf /root/go/src
+fi
+
+# stop binaries edge_core
+cd /usr/local/bin
+
+if [ -f "edge_core" ]; then
+ sudo rm edge_core
+fi
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+NGINX=$(sudo docker ps | grep nginx | wc -l)
+KUBEPROXY=$(sudo docker ps | grep k8s.gcr.io | wc -l)
+CONSTZERO="0"
+
+# start
+echo "nginx container stop"
+if [ $NGINX != $CONSTZERO ]; then
+ sudo docker kill $(docker ps -q --filter ancestor=nginx:1.15.12 )
+fi
+
+echo "kubeproxy container stop"
+if [ $KUBEPROXY != $CONSTZERO ]; then
+ sudo docker kill $(docker ps -q --filter ancestor=k8s.gcr.io/kube-proxy:v1.14.3 )
+fi
+echo "Finished"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+TESTYAML="testk8s-nginx.yaml"
+SUPERUSER="root"
+value=$(whoami)
+
+# start
+
+# kubeedge reset internally undo the things done by ./kubeedge init
+
+if [ -d "$GOPATH/src/github.com/kubeedge/kubeedge/keadm" ]; then
+ cd $GOPATH/src/github.com/kubeedge/kubeedge/keadm
+ ./keadm reset
+fi
+
+# delete the previously existing certificates
+
+if [ -d "/etc/kubeedge/ca" ]; then
+ sudo rm -rf /etc/kubeedge/ca
+fi
+
+if [ -d "/etc/kubeedge/certs" ]; then
+ cd /etc/kubeedge
+ sudo rm -rf certs
+fi
+
+cd /etc/kubeedge
+if [ -f "certs.tgz" ]; then
+ sudo rm certs.tgz
+fi
+
+# delete the kubeedge code
+
+if [ -d "$GOPATH/src" ]; then
+ cd $GOPATH
+ sudo rm -rf src
+fi
+
+# stop binaries edge_core edgecontroller
+
+cd /usr/local/bin
+
+if [ -f "edge_core" ]; then
+ sudo rm edge_core
+fi
+
+if [ -f "edgecontroller" ]; then
+ sudo rm edgecontroller
+fi
+
+if [ $value != $SUPERUSER ]; then
+ sudo su
+fi
+
+cd
+
+if [ -f $TESTYAML ]; then
+ sudo rm $TESTYAML
+fi
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# The script is to stop and remove the prometheus and cadvisor containers from
+# ELIOT Manager and ELIOT Edge Node respectively.
+
+# stop prometheus in ELIOT Manager
+source uninstall_prometheus.sh | tee uninstall_prometheus.log
+
+#stop cadvisor statement executed at ELIOT Edge Node
+stop_cadvisor_atedge="cd eliot/scripts/ci_management && source uninstall_cadvisor.sh"
+# Read all the Worker Node details from nodelist file.
+while read line
+do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${stop_cadvisor_atedge}
+done < ../nodelist > /dev/null 2>&1
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+#stop cadvisor statement executed at ELIOT Edge Node
+if [ $(sudo docker ps | grep cadvisor | wc -l) -gt 0 ];then
+ sudo docker stop $(sudo docker ps | grep cadvisor | awk '{ print $1 }')
+fi
+
+if [ $(sudo docker ps -a | grep cadvisor | wc -l) -gt 0 ];then
+ sudo docker rm $(sudo docker ps -a | grep cadvisor | awk '{ print $1 }')
+fi
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+# stop prometheus in ELIOT Manager
+
+if [ $(sudo docker ps | grep prometheus | wc -l) -gt 0 ];then
+ echo "Stopping prometheus container id :- $(sudo docker ps | grep prometheus | awk '{ print $1 }')"
+ sudo docker stop $(sudo docker ps | grep prometheus | awk '{ print $1 }')
+fi
+if [ $(sudo docker ps -a | grep prometheus | wc -l) -gt 0 ];then
+ echo "Removing prometheus container id $(sudo docker ps -a | grep prometheus | awk '{ print $1 }')"
+ sudo docker rm $(sudo docker ps -a | grep prometheus | awk '{ print $1 }')
+fi
+
--- /dev/null
+# Calico Version v3.3.4
+# https://docs.projectcalico.org/v3.3/releases#v3.3.4
+# This manifest includes the following component versions:
+# calico/node:v3.3.4
+# calico/cni:v3.3.4
+#
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # To enable Typha, set this to "calico-typha" *and*
+ # set a non-zero value for Typha replicas
+ # below. We recommend using Typha if you have more than 50 nodes.
+ # Above 100 nodes it is essential.
+ typha_service_name: "none"
+ # Configure the Calico backend to use.
+ calico_backend: "bird"
+
+ # Configure the MTU to use
+ veth_mtu: "1440"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.0",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "datastore_type": "kubernetes",
+ "nodename": "__KUBERNETES_NODE_NAME__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "usePodCidr"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+
+---
+
+
+# This manifest creates a Service,
+# which will be backed by Calico's Typha daemon.
+# Typha sits in between Felix and the API server,
+# reducing Calico's load on the API server.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ ports:
+ - port: 5473
+ protocol: TCP
+ targetPort: calico-typha
+ name: calico-typha
+ selector:
+ k8s-app: calico-typha
+
+---
+
+# This manifest creates a Deployment of Typha to back the above service.
+
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ # Number of Typha replicas.
+ # To enable Typha, set this to a non-zero value *and* set the
+ # typha_service_name variable in the calico-config ConfigMap above.
+ #
+ # We recommend using Typha if you have more than 50 nodes.
+ # Above 100 nodes it is essential
+ # (when using the Kubernetes datastore).
+ # Use one replica for every 100-200 nodes. In production,
+ # we recommend running at least 3 replicas to reduce the
+ # impact of rolling upgrade.
+ replicas: 0
+ revisionHistoryLimit: 2
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-typha
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical
+ # add-on, ensuring it gets priority scheduling
+ # and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Since Calico can't network a pod until Typha is up,
+ # we need to run Typha itself as a host-networked pod.
+ serviceAccountName: calico-node
+ containers:
+ - image: calico/typha:v3.3.4
+ name: calico-typha
+ ports:
+ - containerPort: 5473
+ name: calico-typha
+ protocol: TCP
+ env:
+ # Enable "info" logging by default.
+ # Can be set to "debug" to increase verbosity.
+ - name: TYPHA_LOGSEVERITYSCREEN
+ value: "info"
+ # Disable logging to file and syslog
+ # since those don't make sense in K8s.
+ - name: TYPHA_LOGFILEPATH
+ value: "none"
+ - name: TYPHA_LOGSEVERITYSYS
+ value: "none"
+ # Monitor the Kubernetes API to find the number of running instances
+ # and rebalance connections.
+ - name: TYPHA_CONNECTIONREBALANCINGMODE
+ value: "kubernetes"
+ - name: TYPHA_DATASTORETYPE
+ value: "kubernetes"
+ - name: TYPHA_HEALTHENABLED
+ value: "true"
+ # Uncomment these lines to enable prometheus metrics.
+ # Since Typha is host-networked,
+ # this opens a port on the host, which may need to be secured.
+ # - name: TYPHA_PROMETHEUSMETRICSENABLED
+ # value: "true"
+ # - name: TYPHA_PROMETHEUSMETRICSPORT
+ # value: "9093"
+ livenessProbe:
+ exec:
+ command:
+ - calico-typha
+ - check
+ - liveness
+ periodSeconds: 30
+ initialDelaySeconds: 30
+ readinessProbe:
+ exec:
+ command:
+ - calico-typha
+ - check
+ - readiness
+ periodSeconds: 10
+---
+
+# This manifest creates a Pod Disruption Budget
+# for Typha to allow K8s Cluster Autoscaler to evict
+
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-typha
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical add-on, ensuring it gets
+ # priority scheduling and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion;
+ # tell Kubernetes to do a "force deletion"
+ # https://kubernetes.io/docs/concepts
+ # /workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ containers:
+ # Runs calico/node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: calico/node:v3.3.4
+ env:
+ # Use Kubernetes API as the backing datastore.
+ - name: DATASTORE_TYPE
+ value: "kubernetes"
+ # Typha support: controlled by the ConfigMap.
+ - name: FELIX_TYPHAK8SSERVICENAME
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: typha_service_name
+ # Wait for the datastore.
+ - name: WAIT_FOR_DATASTORE
+ value: "true"
+ # Set based on the k8s node name.
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s,bgp"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: "autodetect"
+ # Enable IPIP
+ - name: CALICO_IPV4POOL_IPIP
+ value: "Always"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # The default IPv4 pool to create on startup if none exists.
+ # Pod IPs will be chosen from this range.
+ # Changing this value after installation will have
+ # no effect. This should fall within `--cluster-cidr`.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ host: localhost
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -bird-ready
+ - -felix-ready
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ # This container installs the Calico CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: calico/cni:v3.3.4
+ command: ["/install-cni.sh"]
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # Set the hostname based on the k8s node name.
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # CNI MTU Config variable
+ - name: CNI_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ volumes:
+ # Used by calico/node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+
+---
+
+# Create all the CustomResourceDefinitions needed for
+# Calico policy and networking mode.
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: felixconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: FelixConfiguration
+ plural: felixconfigurations
+ singular: felixconfiguration
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgppeers.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPPeer
+ plural: bgppeers
+ singular: bgppeer
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgpconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPConfiguration
+ plural: bgpconfigurations
+ singular: bgpconfiguration
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ippools.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPPool
+ plural: ippools
+ singular: ippool
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: hostendpoints.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: HostEndpoint
+ plural: hostendpoints
+ singular: hostendpoint
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterinformations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: ClusterInformation
+ plural: clusterinformations
+ singular: clusterinformation
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworkpolicies.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkPolicy
+ plural: globalnetworkpolicies
+ singular: globalnetworkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworksets.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkSet
+ plural: globalnetworksets
+ singular: globalnetworkset
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networkpolicies.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkPolicy
+ plural: networkpolicies
+ singular: networkpolicy
--- /dev/null
+# Calico Version v3.3.4
+# https://docs.projectcalico.org/v3.3/releases#v3.3.4
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-node
+rules:
+ - apiGroups: [""]
+ resources:
+ - namespaces
+ - serviceaccounts
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups: [""]
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ - apiGroups: [""]
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups: [""]
+ resources:
+ - services
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups: ["extensions"]
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups: ["networking.k8s.io"]
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - globalfelixconfigs
+ - felixconfigurations
+ - bgppeers
+ - globalbgpconfigs
+ - bgpconfigurations
+ - ippools
+ - globalnetworkpolicies
+ - globalnetworksets
+ - networkpolicies
+ - clusterinformations
+ - hostendpoints
+ verbs:
+ - create
+ - get
+ - list
+ - update
+ - watch
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+ - kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
--- /dev/null
+# yamllint disable
+---
+# Source: calico/templates/calico-config.yaml
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # Typha is disabled.
+ typha_service_name: "none"
+ # Configure the backend to use.
+ calico_backend: "bird"
+
+ # Configure the MTU to use
+ veth_mtu: "1440"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "datastore_type": "kubernetes",
+ "nodename": "__KUBERNETES_NODE_NAME__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: felixconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: FelixConfiguration
+ plural: felixconfigurations
+ singular: felixconfiguration
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamblocks.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMBlock
+ plural: ipamblocks
+ singular: ipamblock
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: blockaffinities.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BlockAffinity
+ plural: blockaffinities
+ singular: blockaffinity
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamhandles.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMHandle
+ plural: ipamhandles
+ singular: ipamhandle
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamconfigs.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMConfig
+ plural: ipamconfigs
+ singular: ipamconfig
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgppeers.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPPeer
+ plural: bgppeers
+ singular: bgppeer
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgpconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPConfiguration
+ plural: bgpconfigurations
+ singular: bgpconfiguration
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ippools.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPPool
+ plural: ippools
+ singular: ippool
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: hostendpoints.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: HostEndpoint
+ plural: hostendpoints
+ singular: hostendpoint
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterinformations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: ClusterInformation
+ plural: clusterinformations
+ singular: clusterinformation
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworkpolicies.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkPolicy
+ plural: globalnetworkpolicies
+ singular: globalnetworkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworksets.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkSet
+ plural: globalnetworksets
+ singular: globalnetworkset
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networkpolicies.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkPolicy
+ plural: networkpolicies
+ singular: networkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networksets.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkSet
+ plural: networksets
+ singular: networkset
+---
+# Source: calico/templates/rbac.yaml
+
+# Include a clusterrole for the kube-controllers component,
+# and bind it to the calico-kube-controllers serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+rules:
+ # Nodes are watched to monitor for deletions.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - watch
+ - list
+ - get
+ # Pods are queried to check for existence.
+ - apiGroups: [""]
+ resources:
+ - pods
+ verbs:
+ - get
+ # IPAM resources are manipulated when nodes are deleted.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ verbs:
+ - list
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ # Needs access to update clusterinformations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - clusterinformations
+ verbs:
+ - get
+ - create
+ - update
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Include a clusterrole for the calico-node DaemonSet,
+# and bind it to the calico-node serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-node
+rules:
+ # The CNI plugin needs to get pods, nodes, and namespaces.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - services
+ verbs:
+ # Used to discover service IPs for advertisement.
+ - watch
+ - list
+ # Used to discover Typhas.
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes/status
+ verbs:
+ # Needed for clearing NodeNetworkUnavailable flag.
+ - patch
+ # Calico stores some configuration information in node annotations.
+ - update
+ # Watch for changes to Kubernetes NetworkPolicies.
+ - apiGroups: ["networking.k8s.io"]
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+ # Used by Calico for policy information.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - namespaces
+ - serviceaccounts
+ verbs:
+ - list
+ - watch
+ # The CNI plugin patches pods/status.
+ - apiGroups: [""]
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ # Calico monitors various CRDs for config.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - globalfelixconfigs
+ - felixconfigurations
+ - bgppeers
+ - globalbgpconfigs
+ - bgpconfigurations
+ - ippools
+ - ipamblocks
+ - globalnetworkpolicies
+ - globalnetworksets
+ - networkpolicies
+ - networksets
+ - clusterinformations
+ - hostendpoints
+ verbs:
+ - get
+ - list
+ - watch
+ # Calico must create and update some CRDs on startup.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ - felixconfigurations
+ - clusterinformations
+ verbs:
+ - create
+ - update
+ # Calico stores some configuration information on the node.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ # These permissions are only requried for upgrade from v2.6, and can
+ # be removed after upgrade or on fresh installations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - bgpconfigurations
+ - bgppeers
+ verbs:
+ - create
+ - update
+ # These permissions are required for Calico CNI to perform IPAM allocations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ipamconfigs
+ verbs:
+ - get
+ # Block affinities must also be watchable by confd for route aggregation.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ verbs:
+ - watch
+ # The Calico IPAM migration needs to get daemonsets. These permissions can be
+ # removed if not upgrading from an installation using host-local IPAM.
+ - apiGroups: ["apps"]
+ resources:
+ - daemonsets
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+- kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
+
+---
+# Source: calico/templates/calico-node.yaml
+# This manifest installs the calico-node container, as well
+# as the CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical add-on, ensuring it gets
+ # priority scheduling and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ priorityClassName: system-node-critical
+ initContainers:
+ # This container performs upgrade from host-local IPAM to calico-ipam.
+ # It can be deleted if this is a fresh installation, or if you have already
+ # upgraded to use calico-ipam.
+ - name: upgrade-ipam
+ image: calico/cni:v3.8.4
+ command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
+ env:
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ volumeMounts:
+ - mountPath: /var/lib/cni/networks
+ name: host-local-net-dir
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ securityContext:
+ privileged: true
+ # This container installs the CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: calico/cni:v3.8.4
+ command: ["/install-cni.sh"]
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # Set the hostname based on the k8s node name.
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # CNI MTU Config variable
+ - name: CNI_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # Prevents the container from sleeping forever.
+ - name: SLEEP
+ value: "false"
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ securityContext:
+ privileged: true
+ # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
+ # to communicate with Felix over the Policy Sync API.
+ - name: flexvol-driver
+ image: calico/pod2daemon-flexvol:v3.8.4
+ volumeMounts:
+ - name: flexvol-driver-host
+ mountPath: /host/driver
+ securityContext:
+ privileged: true
+ containers:
+ # Runs calico-node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: calico/node:v3.8.4
+ env:
+ # Use Kubernetes API as the backing datastore.
+ - name: DATASTORE_TYPE
+ value: "kubernetes"
+ # Wait for the datastore.
+ - name: WAIT_FOR_DATASTORE
+ value: "true"
+ # Set based on the k8s node name.
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s,bgp"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: "autodetect"
+ # Enable IPIP
+ - name: CALICO_IPV4POOL_IPIP
+ value: "Always"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # The default IPv4 pool to create on startup if none exists. Pod IPs will be
+ # chosen from this range. Changing this value after installation will have
+ # no effect. This should fall within `--cluster-cidr`.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ host: localhost
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -bird-ready
+ - -felix-ready
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ - name: policysync
+ mountPath: /var/run/nodeagent
+ volumes:
+ # Used by calico-node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+ # Mount in the directory for host-local IPAM allocations. This is
+ # used when upgrading from host-local to calico-ipam, and can be removed
+ # if not using the upgrade-ipam init container.
+ - name: host-local-net-dir
+ hostPath:
+ path: /var/lib/cni/networks
+ # Used to create per-pod Unix Domain Sockets
+ - name: policysync
+ hostPath:
+ type: DirectoryOrCreate
+ path: /var/run/nodeagent
+ # Used to install Flex Volume Driver
+ - name: flexvol-driver-host
+ hostPath:
+ type: DirectoryOrCreate
+ path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+
+---
+# Source: calico/templates/calico-kube-controllers.yaml
+
+# See https://github.com/projectcalico/kube-controllers
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+spec:
+ # The controllers can only have a single active instance.
+ replicas: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-kube-controllers
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ serviceAccountName: calico-kube-controllers
+ priorityClassName: system-cluster-critical
+ containers:
+ - name: calico-kube-controllers
+ image: calico/kube-controllers:v3.8.4
+ env:
+ # Choose which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: node
+ - name: DATASTORE_TYPE
+ value: kubernetes
+ readinessProbe:
+ exec:
+ command:
+ - /usr/bin/check-status
+ - -r
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Source: calico/templates/calico-etcd-secrets.yaml
+
+---
+# Source: calico/templates/calico-typha.yaml
+
+---
+# Source: calico/templates/configure-canal.yaml
+
+
--- /dev/null
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# This script is to install common software for ELIOT.
+# To be executed in Eliot Manager and Eliot Nodes.
+# Script will install Docker software.
+# Script has to be executed in Ubuntu 16.04.
+
+# Set Docker version
+DOCKER_VERSION=18.06.1~ce~3-0~ubuntu
+
+sudo apt-get update && sudo apt-get install -y git
+
+# Install Docker as Prerequisite
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo apt-key fingerprint 0EBFCD88
+sudo add-apt-repository \
+ "deb https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+
+sudo apt update
+sudo apt install -y docker-ce=${DOCKER_VERSION}
+
--- /dev/null
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+# constants
+
+DOCKER_VERSION=18.09.6
+KUBE_VERSION=1.16.0-0
+MACHINE=$(uname -m)
+
+# start
+
+# This script will install docker, kubeadm on both Eliot Master and Edge nodes
+
+sudo sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' \
+/etc/sysconfig/selinux
+
+sudo modprobe br_netfilter
+_conf='/etc/sysctl.d/99-akraino-eliot.conf'
+echo 'net.bridge.bridge-nf-call-iptables = 1' |& sudo tee "${_conf}"
+sudo sysctl -q -p "${_conf}"
+
+#echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables
+
+swapoff -a
+
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+
+sudo yum-config-manager \
+--add-repo https://download.docker.com/linux/centos/docker-ce.repo
+
+sudo yum install docker-ce-${DOCKER_VERSION} docker-ce-cli-${DOCKER_VERSION} \
+containerd.io
+
+# Kubernetes repository set
+
+cat <<-EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
+[kubernetes]
+name=Kubernetes
+baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-${MACHINE}
+enabled=1
+gpgcheck=1
+repo_gpgcheck=1
+gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+EOF
+
+# Set SELinux in permissive mode (effectively disabling it)
+setenforce 0
+sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
+
+yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
+systemctl enable --now kubelet
+
+sudo yum install -y kubeadm-${KUBE_VERSION}
+sudo systemctl start docker && sudo systemctl enable docker
+
+sudo systemctl daemon-reload
--- /dev/null
+#!/bin/bash -ex
+#############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+#############################################################################
+
+#######################################################################################
+# The script is to setup the Edgex Foundry application as POD in Kubernetes. #
+#######################################################################################
+
+echo "**********************************************************************"
+echo "Edgex Platform Deployment--------------------------------------STARTED"
+
+echo "Deploying Edgex Platform on IOT-Gateway Edge Node"
+edgexPath=`pwd`
+git clone https://github.com/edgexfoundry-holding/edgex-kubernetes-support.git
+cd edgex-kubernetes-support/releases/edinburgh/kubernetes
+ls
+kubectl create -k .
+cd ${edgexPath}
+echo "-----------------------------------------------------------"
+echo "Edgex platform PODs"
+kubectl get pod
+echo "-----------------------------------------------------------"
+echo "-----------------------------------------------------------"
+echo "Edge platform Kubernetes Services"
+kubectl get svc
+echo "-----------------------------------------------------------"
+kubectl expose deployment edgex-core-consul --type=NodePort --name=consulnodeport
+kubectl expose deployment edgex-core-command --type=NodePort --name=commandnodeport
+kubectl expose deployment edgex-core-data --type=NodePort --name=datanodeport
+kubectl expose deployment edgex-core-metadata --type=NodePort --name=metadatanodeport
+kubectl expose deployment edgex-support-rulesengine --type=NodePort --name=rulesenginenodeport
+kubectl expose deployment edgex-support-logging --type=NodePort --name=loggingnodeport
+kubectl get svc | grep NodePort
+
+echo "**********************************************************************"
+echo "Edgex Platform Deployment--------------------------------------SUCCESS"
+
+
+
+
+
+
+
+
--- /dev/null
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# To verify edgex platform deployment on k8s.
+
+retrytimes=10
+while [ $retrytimes -gt 0 ]
+do
+ if [ 1 == "$(kubectl get pods | grep edgex-config-seed | grep -i completed | wc -l)" ]; then
+ break
+ fi
+ ((retrytimes-=1))
+ sleep 5
+done
+[ $retrytimes -gt 0 ] || exit 1
+
+# Reset the variable to check Running status of other edgex platform microservices
+
+retrytimes=20
+while [ $retrytimes -gt 0 ]
+do
+ if [ 12 == "$(kubectl get pods | grep edgex | grep Running | wc -l)" ]; then
+ echo "Edgex Platform Deployment integration on IOT Gateway---------------------SUCCESS"
+ break
+ fi
+ ((retrytimes-=1))
+ sleep 5
+done
+[ $retrytimes -gt 0 ] || exit 1
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+promyml=~/prometheus.yml
+workernodeip=""
+blank=""
+count=1
+firstline=1
+while read line
+do
+ if [ $count -gt $firstline ]; then
+ workernodeip+="','"
+ fi
+ nodeinfo="${line}"
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ echo $nodeip
+ workernodeip+=$nodeip
+ workernodeip+=":8081"
+ echo $workernodeip
+ count=2
+ echo $count
+done < nodelist > /dev/null 2>&1
+
+echo "workernodeip="
+echo $workernodeip
+
+cat <<EOF > "${promyml}"
+---
+global:
+ scrape_interval: 15s
+
+scrape_configs:
+ - job_name: 'prometheus'
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: cadvisor
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['$workernodeip']
+EOF
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+KUBE_VERSION=1.16.0-00
+POD_NETWORK_CIDR=192.168.0.0/16
+K8S_CNI_VERSION=0.7.5-00
+
+#K8s service CIDR range
+K8s_SVC_CIDR=10.96.0.0/12
+
+# Install Kubernetes with Kubeadm
+
+# Disable swap
+sudo swapoff -a
+sudo apt update
+sudo apt install -y apt-transport-https curl
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+
+sudo apt update
+sudo apt install -y \
+ kubernetes-cni=${K8S_CNI_VERSION} kubelet=${KUBE_VERSION} \
+ kubeadm=${KUBE_VERSION} kubectl=${KUBE_VERSION}
+
+sudo apt-mark hold kubelet kubeadm kubectl
+
+if ! kubectl get nodes; then
+ hostname -I > hostname.tmp
+ MASTER_IP="$(cut -d ' ' -f 1 hostname.tmp)"
+ rm hostname.tmp
+ sudo kubeadm config images pull
+ sudo kubeadm init \
+ --apiserver-advertise-address="${MASTER_IP}" \
+ --pod-network-cidr="${POD_NETWORK_CIDR}" \
+ --service-cidr="${K8s_SVC_CIDR}"
+
+ if [ "$(id -u)" = 0 ]; then
+ KUBECONFIG=/etc/kubernetes/admin.conf
+ echo "export KUBECONFIG=/etc/kubernetes/admin.conf" | \
+ tee -a "${HOME}/.profile"
+ source "${HOME}/.profile"
+ else
+ mkdir -p "${HOME}/.kube"
+ sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+ sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
+ fi
+ #kubectl apply -f "cni/calico/rbac.yaml"
+ kubectl apply -f "cni/calico/v38/calico.yaml"
+
+fi
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+# constants
+
+POD_NETWORK_CIDR=192.168.0.0/16
+KUBE_VERSION=1.16.0-0
+KUBERNETES_CNI=0.7.5-0
+
+# start
+
+hostname -I > hostname.tmp
+MASTER_IP="$(cut -d ' ' -f 1 hostname.tmp)"
+rm hostname.tmp
+
+# kubernetes installation
+
+sudo yum install -y kubelet-${KUBE_VERSION} kubectl-${KUBE_VERSION} \
+kubernetes-cni-${KUBERNETES_CNI}
+
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
+
+# Initialize kubernetes on master
+
+sudo kubeadm init \
+ --apiserver-advertise-address="${MASTER_IP}" \
+ --pod-network-cidr="${POD_NETWORK_CIDR}"
+
+mkdir -p "${HOME}/.kube"
+sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+KUBE_VERSION=1.16.0-00
+K8S_CNI_VERSION=0.7.5-00
+
+# Install Kubernetes with Kubeadm
+# The script will be executed in Eliot Edge Node
+
+sudo swapoff -a
+sudo apt update
+sudo apt install -y apt-transport-https curl
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+
+sudo apt update
+sudo apt install -y \
+ kubeadm=${KUBE_VERSION} kubelet=${KUBE_VERSION} kubernetes-cni=${K8S_CNI_VERSION}
+
+#sudo apt-mark hold kubelet kubeadm
--- /dev/null
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# This script is to install common software for ELIOT.
+# To be executed in Eliot Manager and Eliot Nodes.
+# Script will install Docker software.
+# Script has to be executed in Ubuntu 16.04.
+
+# Set Docker version
+DOCKER_VERSION=18.06.1~ce~3-0~ubuntu
+
+sudo apt-get update && sudo apt-get install -y git
+
+# Install Docker as Prerequisite
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo apt-key fingerprint 0EBFCD88
+sudo add-apt-repository \
+ "deb https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+
+sudo apt update
+sudo apt install -y docker-ce=${DOCKER_VERSION}
+
--- /dev/null
+EDGENODEUSR=""
+EDGENODEIP=""
+EDGENODEPASSWORD=""
+MASTERNODEIP=""
+HOME_EDGENODE="/root"
+KUBEEDGE_ETC="/etc/kubeedge"
+KUBEEDGE_VERSION_ETC="/etc/kubeedge-v1.1.0-linux-amd64/"
+PATH_OF_KUBEEDGE=":/root/eliot/blueprints/iotgateway/scripts/kubeedge"
--- /dev/null
+controller:
+ kube:
+ master: # kube-apiserver address (such as:http://localhost:8080)
+ namespace: ""
+ content_type: "application/vnd.kubernetes.protobuf"
+ qps: 5
+ burst: 10
+ node_update_frequency: 10
+ kubeconfig: "/root/.kube/config"
+cloudhub:
+ protocol_websocket: true # enable websocket protocol
+ port: 10000 # open port for websocket server
+ protocol_quic: true # enable quic protocol
+ quic_port: 10001 # open prot for quic server
+ max_incomingstreams: 10000 # the max incoming stream for quic server
+ enable_uds: true # enable unix domain socket protocol
+ uds_address: unix:///var/lib/kubeedge/kubeedge.sock
+ address: 0.0.0.0
+ ca: /etc/kubeedge/ca/rootCA.crt
+ cert: /etc/kubeedge/certs/edge.crt
+ key: /etc/kubeedge/certs/edge.key
+ keepalive-interval: 30
+ write-timeout: 30
+ node-limit: 10
+devicecontroller:
+ kube:
+ master: # kube-apiserver address (such as:http://localhost:8080)
+ namespace: ""
+ content_type: "application/vnd.kubernetes.protobuf"
+ qps: 5
+ burst: 10
+ kubeconfig: "/root/.kube/config"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+KUBE_VERSION=1.16.0-00
+POD_NETWORK_CIDR=192.168.0.0/16
+K8S_CNI_VERSION=0.7.5-00
+
+#K8s service CIDR range
+K8s_SVC_CIDR=10.96.0.0/12
+
+# Install Kubernetes with Kubeadm
+
+# Disable swap
+sudo swapoff -a
+sudo apt update
+sudo apt install -y apt-transport-https curl
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+
+sudo apt update
+sudo apt install -y \
+ kubernetes-cni=${K8S_CNI_VERSION} kubelet=${KUBE_VERSION} \
+ kubeadm=${KUBE_VERSION} kubectl=${KUBE_VERSION}
+
+sudo apt-mark hold kubelet kubeadm kubectl
+
+if ! kubectl get nodes; then
+ hostname -I > hostname.tmp
+ MASTER_IP="$(cut -d ' ' -f 1 hostname.tmp)"
+ rm hostname.tmp
+ sudo kubeadm config images pull
+ sudo kubeadm init \
+ --apiserver-advertise-address="${MASTER_IP}" \
+ --pod-network-cidr="${POD_NETWORK_CIDR}" \
+ --service-cidr="${K8s_SVC_CIDR}"
+
+ if [ "$(id -u)" = 0 ]; then
+ KUBECONFIG=/etc/kubernetes/admin.conf
+ echo "export KUBECONFIG=/etc/kubernetes/admin.conf" | \
+ tee -a "${HOME}/.profile"
+ source "${HOME}/.profile"
+ else
+ mkdir -p "${HOME}/.kube"
+ sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+ sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
+ fi
+ #kubectl apply -f "cni/calico/v38/calico.yaml"
+
+fi
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+echo "**********************************************************************"
+echo "Kubeedge-v1.1.0 Installation------------------------------------------STARTED"
+
+# constants
+
+VERSION="v1.1.0"
+OS="linux"
+ARCH="amd64"
+
+PATH_OF_EDGECORE="/etc/kubeedge-v1.1.0-linux-amd64/edge/edgecore"
+
+initialize_k8s_cluster()
+{
+ #cd ../
+ #./setup.sh
+ cp /etc/kubernetes/admin.conf /root/.kube/config
+ #source common.sh
+ #source k8smaster.sh
+}
+
+kubeedge_tar_untar()
+{
+
+ curl -L "https://github.com/kubeedge/kubeedge/releases/download/${VERSION}/kubeedge-${VERSION}-${OS}-${ARCH}.tar.gz" \
+ --output kubeedge-${VERSION}-${OS}-${ARCH}.tar.gz && sudo tar -xf kubeedge-${VERSION}-${OS}-${ARCH}.tar.gz -C /etc
+
+}
+
+generate_certs()
+{
+ echo "generate_certs started"
+ wget -L https://raw.githubusercontent.com/kubeedge/kubeedge/master/build/tools/certgen.sh
+
+ chmod +x certgen.sh
+ bash -x ./certgen.sh genCertAndKey edge
+
+ echo "generate_certs ended"
+
+}
+
+initialize_yaml()
+{
+ echo "initalize_yaml started"
+ wget -L https://raw.githubusercontent.com/kubeedge/kubeedge/master/build/crds/devices/devices_v1alpha1_devicemodel.yaml
+
+ chmod +x devices_v1alpha1_devicemodel.yaml
+
+ kubectl create -f devices_v1alpha1_devicemodel.yaml
+
+ wget -L https://raw.githubusercontent.com/kubeedge/kubeedge/master/build/crds/devices/devices_v1alpha1_device.yaml
+
+ chmod +x devices_v1alpha1_device.yaml
+ kubectl create -f devices_v1alpha1_device.yaml
+ echo "initialize_yaml ended"
+}
+
+# Run cloudcore
+
+cloudcore_start()
+{
+ echo "cloudcore_start started"
+ cp controller.yaml /etc/kubeedge-${VERSION}-${OS}-${ARCH}/cloud/cloudcore/conf/controller.yaml
+ cd /etc/kubeedge-${VERSION}-${OS}-${ARCH}/cloud/cloudcore
+ nohup ./cloudcore > cloudcore.log 2>&1 &
+ echo "cloudcore_start ended"
+}
+
+edge_modify()
+{
+ sed "s/0.0.0.0/${MASTERNODEIP}/" /etc/kubeedge-${VERSION}-${OS}-${ARCH}/edge/conf/edge.yaml > /etc/kubeedge-${VERSION}-${OS}-${ARCH}/edge/conf/edge_new.yaml
+ #rm -rf /etc/kubeedge-${VERSION}-${OS}-${ARCH}/edge/conf/edge.yaml
+ mv /etc/kubeedge-${VERSION}-${OS}-${ARCH}/edge/conf/edge_new.yaml /etc/kubeedge-${VERSION}-${OS}-${ARCH}/edge/conf/edge.yaml
+
+}
+
+exec_edge()
+{
+ echo "exec_edge started"
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp -r $KUBEEDGE_ETC \
+ ${EDGENODEUSR}@${EDGENODEIP}:/etc
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp -r $KUBEEDGE_VERSION_ETC \
+ ${EDGENODEUSR}@${EDGENODEIP}:/etc
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ nohup $PATH_OF_EDGECORE > edgecore.log 2>&1 &
+ echo "exec_edge ended"
+}
+
+apply_node_json()
+{
+ echo "apply_node_json started"
+ echo $(pwd)
+ cd ${PATH_OF_KUBEEDGE}
+ kubectl apply -f node.json
+ echo "apply_node_json ended"
+}
+
+
+
+# start
+source config_kubeedge
+initialize_k8s_cluster
+
+# sleep added for k8s kube-system pods to be up
+
+#sleep 240
+
+kubeedge_tar_untar
+
+generate_certs
+
+initialize_yaml
+
+cloudcore_start
+edge_modify
+exec_edge > /dev/null 2>&1
+
+apply_node_json
+
+echo "Kubeedge-v1.1.0 Installation------------------------------------------SUCCESS"
+echo "************************************************************************"
--- /dev/null
+{
+ "kind": "Node",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "edge-node",
+ "labels": {
+ "name": "edge-node",
+ "node-role.kubernetes.io/edge": ""
+ }
+ }
+}
--- /dev/null
+
+######################################################################
+# #
+# The script is to undo the changes on ELIOT Manager and ELIOT nodes #
+# done by setup.sh file. #
+# It uninstalls docker, kubernetes. #
+# It releases the port used. #
+# It deletes the files created for kubernetes in node machine #
+# Script is tested in Ubuntu 16.04 version. #
+######################################################################
+
+# constants
+OSPLATFORM=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+
+show_help()
+{
+ echo "This script will remove docker and its related files from the master and node machines"
+ echo "This script will remove kubeadm kubectl kubelet kubernetes from the master and node machines"
+ echo "The changes will be first executed on manager machine and then node machines."
+ echo "It will pick the node machine details from nodelist file"
+ echo "This file supports Linux- Ubuntu version only"
+}
+
+# Rollbacking the changes on ELIOT Manager Node
+rollback_k8smaster()
+{
+if [ "$(id -u)" = 0 ]; then
+ sudo apt-get install iptables
+ sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
+ sudo apt-get install ipvsadm
+ sudo fuser -k -n tcp 10250
+ sudo yes y | apt-get purge -y docker-engine
+ sudo yes y | apt-get purge -y docker
+ sudo yes y | apt-get purge -y docker.io
+ sudo yes y | apt-get purge -y docker-ce
+ sudo yes y | apt-get purge -y docker-ce-cli
+ sudo yes y | groupdel docker
+ sudo yes y | kubeadm reset
+ sudo yes y | apt-get purge kubeadm
+ sudo yes y | apt-get purge kubectl
+ sudo yes y | apt-get purge kubelet
+ sudo yes y | apt-get purge kube*
+ sudo yes y | apt-get purge kubernetes-cni
+ sudo rm -rf ~/.kube
+ sudo yes y | apt-get autoremove
+ sudo yes y | apt-get autoclean
+else
+ sudo fuser -k -n tcp 10250
+ sudo yes y | sudo apt-get purge -y docker-engine
+ sudo yes y | sudo apt-get purge -y docker
+ sudo yes y | sudo apt-get purge -y docker.io
+ sudo yes y | sudo apt-get purge -y docker-ce
+ sudo yes y | sudo apt-get purge -y docker-ce-cli
+ sudo yes y | sudo kubeadm reset
+ sudo yes y | sudo apt-get purge kubeadm
+ sudo yes y | sudo apt-get purge kubectl
+ sudo yes y | sudo apt-get purge kubelet
+ sudo yes y | sudo apt-get purge kube*
+ sudo yes y | sudo apt-get purge kubernetes-cni
+ sudo rm -rf ~/.kube
+fi
+
+rollback_k8sworkers
+
+}
+
+#Rollbacking the changes on ELIOT Worker Node
+rollback_k8sworkers()
+{
+if [ " $(id -u)" = 0]; then
+ INSTALL_IPVSADM="sudo apt-get install ipvsadm"
+ RESET_PORT="fuser -k -n tcp 10250"
+ #REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf !('manifests') "
+ REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+ REMOVE_DOCKER1="sudo yes y | apt-get purge -y docker-engine"
+ REMOVE_DOCKER2="sudo yes y | apt-get purge -y docker"
+ REMOVE_DOCKER3="sudo yes y | apt-get purge -y docker.io"
+ REMOVE_DOCKER4="sudo yes y | apt-get purge -y docker-ce"
+ REMOVE_DOCKER5="sudo yes y | apt-get purge -y docker-ce-cli"
+ REMOVE_DOCKER6="sudo yes y | groupdel docker"
+ RESET_KUBEADM="sudo yes y | kubeadm reset"
+ REMOVE_KUBE_FILES1="sudo yes y | apt-get purge kubeadm"
+ REMOVE_KUBE_FILES2="sudo yes y | apt-get purge kubectl "
+ REMOVE_KUBE_FILES3="sudo yes y | apt-get purge kubelet "
+ REMOVE_KUBE_FILES4="sudo yes y | apt-get purge kube* "
+ REMOVE_KUBE_FILES5="sudo yes y | apt-get purge kubernetes-cni"
+ REMOVE_KUBE_FILES6="sudo rm -rf ~/.kube"
+ AUTO_REMOVE="sudo yes y | apt-get autoremove"
+ AUTO_CLEAN="sudo yes y | apt-get autoclean"
+else
+ RESET_PORT="fuser -k -n tcp 10250"
+ REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+ REMOVE_DOCKER1="sudo yes y | sudo apt-get purge -y docker-engine"
+ REMOVE_DOCKER2="sudo yes y | sudo apt-get purge -y docker"
+ REMOVE_DOCKER3="sudo yes y | sudo apt-get purge -y docker.io"
+ REMOVE_DOCKER4="sudo yes y | sudo apt-get purge -y docker-ce"
+ REMOVE_DOCKER5="sudo yes y | sudo apt-get purge -y docker-ce-cli"
+ REMOVE_DOCKER6="sudo yes y | sudo groupdel docker"
+ RESET_KUBEADM="sudo yes y | sudo kubeadm reset"
+ REMOVE_KUBE_FILES1="sudo yes y | sudo apt-get purge kubeadm"
+ REMOVE_KUBE_FILES2="sudo yes y | sudo apt-get purge kubectl "
+ REMOVE_KUBE_FILES3="sudo yes y | sudo apt-get purge kubelet "
+ REMOVE_KUBE_FILES4="sudo yes y | sudo apt-get purge kube* "
+ REMOVE_KUBE_FILES5="sudo yes y | sudo apt-get purge kubernetes-cni"
+ REMOVE_KUBE_FILES6="sudo rm -rf ~/.kube"
+fi
+
+#Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${INSTALL_IPVSADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_PORT} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER1} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER2} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER3} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER4} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER5} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER6} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_KUBEADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES1} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES2} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES3} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES4} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES5} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES6} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${AUTO_REMOVE} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${AUTO_CLEAN} < /dev/null
+ done < nodelist > /dev/null 2>&1
+
+}
+
+verify_reset_status()
+{
+echo "Success!!"
+}
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ show_help
+ exit 0
+fi
+
+if [[ $OSPLATFORM = *Ubuntu* ]]; then
+ rollback_k8smaster
+ verify_reset_status
+else
+ echo "Script only supports Ubuntu Version."
+fi
--- /dev/null
+########################################################################################
+# #
+# The script is to reset the settings on ELIOT Manager and ELIOT nodes #
+# before running the setup.sh file again on the same setup. #
+# It resets the settings of kubeadm and restarts its service #
+# It releases the ports used. #
+# It deletes the files created for kubernetes on node machine #
+# Script is tested in Ubuntu 16.04 version. #
+########################################################################################
+
+# constants
+OSPLATFORM=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+
+show_help()
+{
+ echo "The script is to reset the settings on ELIOT Manager and ELIOT nodes which "
+ echo "needs to be done before executing the setup.sh file again."
+ echo "The changes will be first executed on manager machine and then on the node machines."
+ echo "It will pick the node machine details from nodelist file"
+}
+
+# Resetting ELIOT Manager Node
+reset_k8smaster()
+{
+ sudo yes y | kubeadm reset
+ sudo apt-get install iptables
+ sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
+ sudo apt-get install ipvsadm
+ sudo systemctl restart kubelet
+ sudo fuser -k -n tcp 10250
+
+reset_k8sworkers
+}
+
+#Resetting ELIOT Worker Node
+reset_k8sworkers()
+{
+RESET_KUBEADM="sudo yes y | kubeadm reset"
+INSTALL_IPVSADM="sudo apt-get install ipvsadm"
+RESTART_KUBELET="sudo systemctl restart kubelet"
+RESET_PORT="sudo fuser -k -n tcp 10250"
+#REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf !('manifests') "
+REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+REMOVE_CADVISOR_FILES="docker rm cadvisor-iot-node1"
+
+#Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_KUBEADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${INSTALL_IPVSADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESTART_KUBELET} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_PORT} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_CADVISOR_FILES} < /dev/null
+ done < nodelist > /dev/null 2>&1
+}
+
+verify_reset_status()
+{
+echo "Success!!"
+}
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ show_help
+ exit 0
+fi
+
+if [[ $OSPLATFORM = *Ubuntu* ]]; then
+ reset_k8smaster
+ verify_reset_status
+else
+ echo "The script supports only Linux - Ubuntu"
+fi
--- /dev/null
+<eliotedgenodeusername>|<eliotedgenodeip>|<eliotedgenodepassword>
--- /dev/null
+FROM centos:centos7
+ADD ./work/ /root/work
+RUN yum install -y gcc git
+RUN yum groupinstall -y 'Development Tools'
+RUN cd /root/work && ls -al && tar xzf cmake-3.15.2.tar.gz && \
+ rpm -Uvh scons-2.3.0-1.el7.centos.noarch.rpm && \
+ python get-pip.py && \
+ cd cmake-3.15.2 && ./bootstrap && \
+ make && \
+ make install && \
+ cd ../protocol-opcua-c/ && \
+ ./build.sh
--- /dev/null
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+
+# set the docker name and docker tag when you build
+DOCKER_NAME=
+DOCKER_TAG=
+
+export ELIOT_DIR=$(cd $(dirname $0); pwd)
+export WORK_DIR=$ELIOT_DIR/work
+export CMAKE_URL=https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2.tar.gz
+export SCONS_PPA_URL=http://repo.okay.com.mx/centos/7/x86_64/release//scons-2.3.0-1.el7.centos.noarch.rpm
+export GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py
+export OPCUA_REPO=https://github.com/edgexfoundry-holding/protocol-opcua-c.git
+export DOCKER_NAME=${DOCKER_NAME:-"eliot/opc-ua"}
+export DOCKER_TAG=${DOCKER_TAG:-"latest"}
+
+
+rm -rf $WORK_DIR
+mkdir -p $WORK_DIR
+
+cd $WORK_DIR
+wget $CMAKE_URL
+wget $SCONS_PPA_URL
+wget $GET_PIP_URL
+git clone $OPCUA_REPO
+
+cd $ELIOT_DIR
+docker build ./ -t $DOCKER_NAME:$DOCKER_TAG
--- /dev/null
+#!/bin/bash
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+set -o errexit
+
+# set the docker name and docker tag when you build
+# export DOCKER_NAME=eliot/opc-ua
+# export DOCKER_TAG=latest
+
+export ELIOT_DIR=$(cd $(dirname $0); pwd)
+export WORK_DIR=$ELIOT_DIR/work
+export CMAKE_URL=https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2.tar.gz
+export SCONS_PPA_URL=http://repo.okay.com.mx/centos/7/x86_64/release//scons-2.3.0-1.el7.centos.noarch.rpm
+export GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py
+export OPCUA_REPO=https://github.com/edgexfoundry-holding/protocol-opcua-c.git
+export DOCKER_NAME=${DOCKER_NAME:-"eliot/opc-ua"}
+export DOCKER_TAG=${DOCKER_TAG:-"latest"}
+
+# Clean and Create the work directory
+rm -rf $WORK_DIR
+mkdir -p $WORK_DIR
+
+yum install -y gcc git wget
+yum groupinstall -y 'Development Tools'
+# Get the package and source code
+cd $WORK_DIR
+wget $CMAKE_URL
+wget $SCONS_PPA_URL
+wget $GET_PIP_URL
+git clone $OPCUA_REPO
+
+# Install Package
+rpm -Uvh scons-2.3.0-1.el7.centos.noarch.rpm
+python get-pip.py
+
+# Build and Install camke
+tar xzf cmake-3.15.2.tar.gz
+cd ${WORK_DIR}/cmake-3.15.2
+./bootstrap
+make
+make install
+
+# Build the opc-ua server and client
+cd ${WORK_DIR}/protocol-opcua-c/
+./build.sh
+
+set +x
+echo "####################################################"
+echo "# If you want to start the server, follow below steps"
+echo "# cd ${WORK_DIR}/protocol-opcua-c/example/out"
+echo "# ./server"
+echo "####################################################"
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# start
+PROMETHEUS_HOST_PORT="9090"
+PROMETHEUS_CONTAINTER_PORT="9090"
+#cp ci_management/prometheus.yml $HOME
+
+echo "**********************************************************************"
+echo "Prometheus setup ----------------------------------------------STARTED"
+
+
+source generatePromeyml.sh
+if [ ! -d "/etc/prometheus" ]; then
+ sudo mkdir /etc/prometheus
+fi
+
+sudo docker run -p ${PROMETHEUS_HOST_PORT}:${PROMETHEUS_CONTAINTER_PORT} \
+ -v ~/prometheus.yml:/etc/prometheus/prometheus.yml \
+ -d prom/prometheus \
+ --config.file=/etc/prometheus/prometheus.yml
+
+echo "Prometheus setup ----------------------------------------------SUCCESS"
+echo "**********************************************************************"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+########################################################################################
+# #
+# The script is to setup the ELIOT Manager and ELIOT nodes. #
+# It installs Docker in both ELIOT Manager and ELIOT node. #
+# It installs Kubernetes. In the ELIOT Manager kubeadm, kubelet, kubectl is installed. #
+# In ELIOT Edge Node it will install kubeadn, kubelet. #
+# Script is tested in Ubuntu 16.04 version. #
+# sshpass needs to be installed before executing this script. #
+########################################################################################
+
+echo "**********************************************************************"
+echo "ELIOT IOT-Gateway Platform Deployment--------------------------STARTED"
+
+# constants
+
+OSPLATFORM=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+ELIOT_REPO="https://gerrit.akraino.org/r/eliot"
+
+show_help()
+{
+ echo "The script helps in setting up the ELIOT Toplogy Infrastrucutre"
+ echo "The setup installs Docker, K8S Master and K8S worker nodes in "
+ echo "ELIOT Manager and ELIOT Workder Nodes respectively "
+ echo "After completion of script execution execute command: "
+ echo "kubectl get nodes to check whether the connection between "
+ echo "ELIOT Manager and ELIOT Nodes are established"
+ echo ""
+ echo "Nodelist file should have the details of Worker Nodes in the format of:"
+ echo "EliotNodeUserName|EliotNodeIP|EliotNodePasswor"
+ echo "Each line should have detail of one ELIOT Node only"
+}
+
+# Setting up ELIOT Manager Node.
+# Installing Docker, K8S and Initializing K8S Master
+setup_k8smaster()
+{
+ #set -o xtrace
+ sudo rm -rf ~/.kube
+ source common.sh | tee eliotcommon.log
+ source k8smaster.sh | tee kubeadm.log
+ # Setup ELIOT Node
+ setup_k8sworkers
+}
+
+setup_k8sworkers()
+{
+ set -o xtrace
+
+ # Install Docker on ELIOT Node
+ SETUP_WORKER_COMMON="sudo rm -rf ~/eliot &&\
+ git clone ${ELIOT_REPO} &&\
+ cd eliot/blueprints/iotgateway/scripts/ && source common.sh"
+ #SETUP_WORKER_COMMON="cd eliot/scripts/ && source common.sh"
+ SETUP_WORKER="cd eliot/blueprints/iotgateway/scripts/ && source k8sworker.sh"
+
+ KUBEADM_TOKEN=$(kubeadm token create --print-join-command)
+ KUBEADM_JOIN="sudo ${KUBEADM_TOKEN}"
+
+ # Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER_COMMON} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${KUBEADM_JOIN} < /dev/null
+ done < nodelist > /dev/null 2>&1
+
+}
+
+setup_k8smaster_centos()
+{
+ set -o xtrace
+ sudo rm -rf ~/.kube
+ source common_centos.sh | tee eliotcommon_centos.log
+ source k8smaster_centos.sh | tee kubeadm_centos.log
+
+ # Setup ELIOT Node
+ setup_k8sworkers_centos
+
+ kubectl apply -f cni/calico/v38/calico.yaml
+
+}
+
+
+setup_k8sworkers_centos()
+{
+ set -o xtrace
+ # Install Docker on ELIOT Node
+
+ SETUP_WORKER_COMMON_CENTOS="sudo rm -rf ~/eliot &&\
+ git clone ${ELIOT_REPO} &&\
+ cd eliot/blueprints/iotgateway/scripts/ && source common_centos.sh"
+
+ # SETUP_WORKER_COMMON_CENTOS="cd /root/eliot/scripts/ && source common_centos.sh"
+
+ KUBEADM_TOKEN=$(sudo kubeadm token create --print-join-command)
+ KUBEADM_JOIN_CENTOS="sudo ${KUBEADM_TOKEN}"
+ # Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}" < /dev/null 2>&1
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1) < /dev/null
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2) < /dev/null
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3) < /dev/null
+ sudo sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER_COMMON_CENTOS} < /dev/null
+ sudo sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${KUBEADM_JOIN_CENTOS} < /dev/null
+ done < nodelist > /dev/null 2>&1
+
+}
+
+# verify kubernetes setup by deploying nginx server.
+
+verify_k8s_status(){
+ set -o xtrace
+ source verifyk8s.sh | tee verifyk8s.log
+}
+
+
+install_edgex(){
+ set -o xtrace
+ cd edgex && source edgexonk8s.sh
+}
+
+# verify installation of edgex platform
+verify_edgex()
+{
+ set -o xtrace
+ source verifyedgex.sh | tee verifyedgex.log
+
+}
+
+
+install_cadvisor_edge(){
+ set -o xtrace
+ SETUP_CADVISOR_ATEDGE="cd eliot/blueprints/iotgateway/scripts/ && source cadvisorsetup.sh"
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_CADVISOR_ATEDGE} < /dev/null
+ done < nodelist > /dev/null 2>&1
+ echo "CADVISOR Installed in all the ELIOT IOT-GATEWAY Nodes"
+}
+
+install_prometheus(){
+ set -o xtrace
+ source prometheus.sh | tee install_prometheus.log
+ echo "Prometheus deployed successfully on ELIOT Manager Node and integrated with CAdvisor running on IOT-Gateway Nodes "
+}
+
+install_opcua_centos(){
+ set -o xtrace
+ INSTALL_OPCUA_ATEDGE="cd eliot/blueprints/iotgateway/scripts/opc-ua/ && source install.sh"
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${INSTALL_OPCUA_ATEDGE} < /dev/null
+ done < nodelist > /dev/null 2>&1
+ echo " OPC-UA Server and Client are successfully Deployed on all IOT-Gateway Nodes"
+}
+
+# Start
+#
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ show_help
+ exit 0
+fi
+
+setupPath=`pwd`
+
+if [[ $OSPLATFORM = *CentOS* ]]; then
+ setup_k8smaster_centos
+else
+ setup_k8smaster
+fi
+
+sleep 20
+verify_k8s_status
+install_cadvisor_edge
+sleep 10
+install_prometheus
+sleep 5
+sudo docker ps | grep prometheus
+
+install_edgex
+sleep 20
+verify_edgex
+
+# Installing OPC-UA on IOT Gateway Node
+
+cd ${setupPath}
+if [[ $OSPLATFORM = *CentOS* ]]; then
+ install_opcua_centos
+fi
+
+# Removing the taint from master node
+kubectl taint nodes --all node-role.kubernetes.io/master- || true
+
+echo "**********************************************************************"
+echo "ELIOT IOT-Gateway Platform Deployment--------------------------SUCCESS"
+
--- /dev/null
+# edge node user name
+EDGENODEUSR=""
+
+# edge node ip
+EDGENODEIP=""
+
+# edge node password
+EDGENODEPASSWORD=""
+
+# master node user name
+MASTERNODEUSR=""
+
+# master node ip
+MASTERNODEIP=""
+
+# master node password
+MASTERNODEPASSWORD=""
+
+# eliot source code path including eliot folder
+PATH_OF_ELIOTFOLDER=""
+
+# home path of edge node
+HOME_EDGENODE=""
+
+# edgenode id for kubeedge configuration
+EDGENODEID=""
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+#Constants
+KUBEEDGE_SRC="$GOPATH/src/github.com/kubeedge/kubeedge"
+KUBEEDGE_BIN="$GOPATH/src/github.com/kubeedge/kubeedge/keadm"
+VERIFY_K8S="$PATH_OF_ELIOTFOLDER/scripts/verifyk8s.sh"
+
+{ set +x; } > /dev/null 2>&1
+
+if [ -n "$1" ]; then
+
+if [ "$1" != "--help" ]; then
+ echo ""
+ echo "Usage of the command is wrong.. Please type ./kubeedge_setup.sh --help for more details"
+ echo ""
+ exit 0
+fi
+
+fi
+
+if [ "$1" == "--help" ]; then
+ echo ""
+ echo "This script will setup the kubeedge installation on Eliot master and Eliot edge"
+ echo "Before Executing this, add Eliot master and Eliot edge details in config_kubeedge file"
+ echo ""
+ exit 0; set -x;
+fi
+
+# take_keedge will download the source code of kubeedge in master and in edge
+
+take_keedge(){
+
+ source ~/.profile
+ git clone https://github.com/kubeedge/kubeedge.git \
+ $KUBEEDGE_SRC
+ cd $KUBEEDGE_BIN
+ make
+}
+
+source config_kubeedge > /dev/null 2>&1
+
+common_steps="echo $GOPATH && \
+git clone https://github.com/kubeedge/kubeedge.git $KUBEEDGE_SRC && \
+source ~/.profile && \
+cd $GOPATH/src && \
+sudo chmod -R 777 github.com && \
+cd $KUBEEDGE_BIN && \
+make"
+
+edge_start="cd $KUBEEDGE_BIN && \
+sudo chmod +x keadm && \
+sudo ./keadm join --edgecontrollerip=$MASTERNODEIP --edgenodeid=$EDGENODEID \
+--k8sserverip=$MASTERNODEIP:8080"
+
+# Initialisation of ELIOT master with kubeedge
+
+execute_keedge_controller(){
+ cd $KUBEEDGE_BIN
+ sudo chmod +x keadm
+ sudo ./keadm init
+}
+
+# Initialisation of Eliot edge with kubeedge
+
+exec_edge(){
+
+ cd $PATH_OF_ELIOTFOLDER/scripts/src
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp $PATH_OF_ELIOTFOLDER/scripts/src/config_kubeedge \
+ ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ source config_kubeedge
+
+ source config_kubeedge > /dev/null 2>&1
+ sshpass -p ${EDGENODEPASSWORD} \
+ ssh ${EDGENODEUSR}@${EDGENODEIP} ${common_steps}
+
+ echo "After cloning the code in ELIOT edge node"
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp /etc/kubeedge/certs.tgz ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ sudo tar -xvzf $HOME/certs.tgz --directory /etc/kubeedge
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ ssh ${EDGENODEUSR}@${EDGENODEIP} ${edge_start}
+}
+
+# start
+
+source config_kubeedge > /dev/null 2>&1
+
+take_keedge
+
+execute_keedge_controller
+
+exec_edge > /dev/null 2>&1
+
+sleep 10
+sudo kubectl get nodes
+
+if [ "$(id -u)" = 0 ]; then
+ echo "export KUBECONFIG=/etc/kubernetes/admin.conf" | \
+tee -a "${HOME}/.profile"
+ source "${HOME}/.profile"
+else
+ mkdir -p "${HOME}/.kube"
+ sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+ sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
+fi
+
+chmod +x $VERIFY_K8S
+source $VERIFY_K8S
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+NGINXDEP=~/testk8s-nginx.yaml
+
+cat <<EOF > "${NGINXDEP}"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.15.12
+ ports:
+ - containerPort: 80
+ hostPort: 80
+EOF
+
+#check if nginx is already deployed
+if ! kubectl get pods | grep nginx; then
+ kubectl create -f ~/testk8s-nginx.yaml
+fi
+
+#To check whether the deployment is succesesfull
+retry=10
+while [ $retry -gt 0 ]
+do
+ if [ 2 == "$(kubectl get pods | grep -c -e STATUS -e Running)" ]; then
+ break
+ fi
+ ((retry-=1))
+ sleep 10
+done
+[ $retry -gt 0 ] || exit 1
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+echo "***********************************************************************"
+echo "cadvisor installation-------------------------------------------STARTED"
+
+HOSTNAME=`hostname`
+sudo docker run \
+ --volume=/:/rootfs:ro \
+ --volume=/var/run:/var/run:ro \
+ --volume=/sys:/sys:ro \
+ --volume=/var/lib/docker/:/var/lib/docker:ro \
+ --volume=/dev/disk/:/dev/disk:ro \
+ --publish=8081:8080 \
+ --detach=true \
+ --name=cadvisor-${HOSTNAME} \
+ google/cadvisor:latest
+
+echo "cadvisor setup--------------------------------------------------SUCCESS"
+echo "***********************************************************************"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+TESTYAML="testk8s-nginx.yaml"
+
+# start
+
+source ../src/config_kubeedge > /dev/null 2>&1
+cd
+kubectl delete -f $TESTYAML
+
+exec_edge_master(){
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp ${PATH_OF_ELIOTFOLDER}/blueprints/uCPE/scripts/ci_management/cleanup_edge.sh \
+ ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ source cleanup_edge.sh
+
+ cd $PATH_OF_ELIOTFOLDER/blueprints/uCPE/scripts/ci_management
+ source cleanup_master.sh
+f
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp ${PATH_OF_ELIOTFOLDER}/blueprints/uCPE/scripts/ci_management/cleanup_edge_final.sh \
+ ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ source cleanup_edge_final.sh
+
+}
+
+exec_edge_master > /dev/null 2>&1
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+NGINX=$(sudo docker ps | grep nginx | wc -l)
+KUBEPROXY=$(sudo docker ps | grep k8s.gcr.io | wc -l)
+CONSTZERO="0"
+
+# start
+
+source config_kubeedge > /dev/null 2>&1
+source ~/.profile
+
+cd
+
+if [ -d "$GOPATH/src/github.com/kubeedge/kubeedge/keadm" ]; then
+ cd $GOPATH/src/github.com/kubeedge/kubeedge/keadm
+ ./keadm reset --k8sserverip $MASTERNODEIP:8080
+fi
+
+cd /etc/kubeedge
+
+if [ -f "certs.tgz" ]; then
+ sudo rm -rf certs.tgz
+fi
+
+if [ -d "/etc/kubeedge/ca" ]; then
+ sudo rm -rf /etc/kubeedge/ca
+fi
+
+if [ -d "/etc/kubeedge/certs" ]; then
+ sudo rm -rf /etc/kubeedge/certs
+fi
+
+if [ -d "/root/go/src" ]; then
+ sudo rm -rf /root/go/src
+fi
+
+# stop binaries edge_core
+cd /usr/local/bin
+
+if [ -f "edge_core" ]; then
+ sudo rm edge_core
+fi
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+NGINX=$(sudo docker ps | grep nginx | wc -l)
+KUBEPROXY=$(sudo docker ps | grep k8s.gcr.io | wc -l)
+CONSTZERO="0"
+
+# start
+echo "nginx container stop"
+if [ $NGINX != $CONSTZERO ]; then
+ sudo docker kill $(docker ps -q --filter ancestor=nginx:1.15.12 )
+fi
+
+echo "kubeproxy container stop"
+if [ $KUBEPROXY != $CONSTZERO ]; then
+ sudo docker kill $(docker ps -q --filter ancestor=k8s.gcr.io/kube-proxy:v1.14.3 )
+fi
+echo "Finished"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# constants
+
+TESTYAML="testk8s-nginx.yaml"
+SUPERUSER="root"
+value=$(whoami)
+
+# start
+
+# kubeedge reset internally undo the things done by ./kubeedge init
+
+if [ -d "$GOPATH/src/github.com/kubeedge/kubeedge/keadm" ]; then
+ cd $GOPATH/src/github.com/kubeedge/kubeedge/keadm
+ ./keadm reset
+fi
+
+# delete the previously existing certificates
+
+if [ -d "/etc/kubeedge/ca" ]; then
+ sudo rm -rf /etc/kubeedge/ca
+fi
+
+if [ -d "/etc/kubeedge/certs" ]; then
+ cd /etc/kubeedge
+ sudo rm -rf certs
+fi
+
+cd /etc/kubeedge
+if [ -f "certs.tgz" ]; then
+ sudo rm certs.tgz
+fi
+
+# delete the kubeedge code
+
+if [ -d "$GOPATH/src" ]; then
+ cd $GOPATH
+ sudo rm -rf src
+fi
+
+# stop binaries edge_core edgecontroller
+
+cd /usr/local/bin
+
+if [ -f "edge_core" ]; then
+ sudo rm edge_core
+fi
+
+if [ -f "edgecontroller" ]; then
+ sudo rm edgecontroller
+fi
+
+if [ $value != $SUPERUSER ]; then
+ sudo su
+fi
+
+cd
+
+if [ -f $TESTYAML ]; then
+ sudo rm $TESTYAML
+fi
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# The script is to stop and remove the prometheus and cadvisor containers from
+# ELIOT Manager and ELIOT Edge Node respectively.
+
+# stop prometheus in ELIOT Manager
+source uninstall_prometheus.sh | tee uninstall_prometheus.log
+
+#stop cadvisor statement executed at ELIOT Edge Node
+stop_cadvisor_atedge="cd eliot/blueprints/uCPE/scripts/ci_management && source uninstall_cadvisor.sh"
+# Read all the Worker Node details from nodelist file.
+while read line
+do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${stop_cadvisor_atedge}
+done < ../nodelist > /dev/null 2>&1
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+#stop cadvisor statement executed at ELIOT Edge Node
+if [ $(sudo docker ps | grep cadvisor | wc -l) -gt 0 ];then
+ sudo docker stop $(sudo docker ps | grep cadvisor | awk '{ print $1 }')
+fi
+
+if [ $(sudo docker ps -a | grep cadvisor | wc -l) -gt 0 ];then
+ sudo docker rm $(sudo docker ps -a | grep cadvisor | awk '{ print $1 }')
+fi
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+
+# stop prometheus in ELIOT Manager
+
+if [ $(sudo docker ps | grep prometheus | wc -l) -gt 0 ];then
+ echo "Stopping prometheus container id :- $(sudo docker ps | grep prometheus | awk '{ print $1 }')"
+ sudo docker stop $(sudo docker ps | grep prometheus | awk '{ print $1 }')
+fi
+if [ $(sudo docker ps -a | grep prometheus | wc -l) -gt 0 ];then
+ echo "Removing prometheus container id $(sudo docker ps -a | grep prometheus | awk '{ print $1 }')"
+ sudo docker rm $(sudo docker ps -a | grep prometheus | awk '{ print $1 }')
+fi
+
--- /dev/null
+# Calico Version v3.3.4
+# https://docs.projectcalico.org/v3.3/releases#v3.3.4
+# This manifest includes the following component versions:
+# calico/node:v3.3.4
+# calico/cni:v3.3.4
+#
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+---
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # To enable Typha, set this to "calico-typha" *and*
+ # set a non-zero value for Typha replicas
+ # below. We recommend using Typha if you have more than 50 nodes.
+ # Above 100 nodes it is essential.
+ typha_service_name: "none"
+ # Configure the Calico backend to use.
+ calico_backend: "bird"
+
+ # Configure the MTU to use
+ veth_mtu: "1440"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.0",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "datastore_type": "kubernetes",
+ "nodename": "__KUBERNETES_NODE_NAME__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "host-local",
+ "subnet": "usePodCidr"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+
+---
+
+
+# This manifest creates a Service,
+# which will be backed by Calico's Typha daemon.
+# Typha sits in between Felix and the API server,
+# reducing Calico's load on the API server.
+
+apiVersion: v1
+kind: Service
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ ports:
+ - port: 5473
+ protocol: TCP
+ targetPort: calico-typha
+ name: calico-typha
+ selector:
+ k8s-app: calico-typha
+
+---
+
+# This manifest creates a Deployment of Typha to back the above service.
+
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ # Number of Typha replicas.
+ # To enable Typha, set this to a non-zero value *and* set the
+ # typha_service_name variable in the calico-config ConfigMap above.
+ #
+ # We recommend using Typha if you have more than 50 nodes.
+ # Above 100 nodes it is essential
+ # (when using the Kubernetes datastore).
+ # Use one replica for every 100-200 nodes. In production,
+ # we recommend running at least 3 replicas to reduce the
+ # impact of rolling upgrade.
+ replicas: 0
+ revisionHistoryLimit: 2
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-typha
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical
+ # add-on, ensuring it gets priority scheduling
+ # and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ cluster-autoscaler.kubernetes.io/safe-to-evict: 'true'
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Since Calico can't network a pod until Typha is up,
+ # we need to run Typha itself as a host-networked pod.
+ serviceAccountName: calico-node
+ containers:
+ - image: calico/typha:v3.3.4
+ name: calico-typha
+ ports:
+ - containerPort: 5473
+ name: calico-typha
+ protocol: TCP
+ env:
+ # Enable "info" logging by default.
+ # Can be set to "debug" to increase verbosity.
+ - name: TYPHA_LOGSEVERITYSCREEN
+ value: "info"
+ # Disable logging to file and syslog
+ # since those don't make sense in K8s.
+ - name: TYPHA_LOGFILEPATH
+ value: "none"
+ - name: TYPHA_LOGSEVERITYSYS
+ value: "none"
+ # Monitor the Kubernetes API to find the number of running instances
+ # and rebalance connections.
+ - name: TYPHA_CONNECTIONREBALANCINGMODE
+ value: "kubernetes"
+ - name: TYPHA_DATASTORETYPE
+ value: "kubernetes"
+ - name: TYPHA_HEALTHENABLED
+ value: "true"
+ # Uncomment these lines to enable prometheus metrics.
+ # Since Typha is host-networked,
+ # this opens a port on the host, which may need to be secured.
+ # - name: TYPHA_PROMETHEUSMETRICSENABLED
+ # value: "true"
+ # - name: TYPHA_PROMETHEUSMETRICSPORT
+ # value: "9093"
+ livenessProbe:
+ exec:
+ command:
+ - calico-typha
+ - check
+ - liveness
+ periodSeconds: 30
+ initialDelaySeconds: 30
+ readinessProbe:
+ exec:
+ command:
+ - calico-typha
+ - check
+ - readiness
+ periodSeconds: 10
+---
+
+# This manifest creates a Pod Disruption Budget
+# for Typha to allow K8s Cluster Autoscaler to evict
+
+apiVersion: policy/v1beta1
+kind: PodDisruptionBudget
+metadata:
+ name: calico-typha
+ namespace: kube-system
+ labels:
+ k8s-app: calico-typha
+spec:
+ maxUnavailable: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-typha
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical add-on, ensuring it gets
+ # priority scheduling and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion;
+ # tell Kubernetes to do a "force deletion"
+ # https://kubernetes.io/docs/concepts
+ # /workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ containers:
+ # Runs calico/node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: calico/node:v3.3.4
+ env:
+ # Use Kubernetes API as the backing datastore.
+ - name: DATASTORE_TYPE
+ value: "kubernetes"
+ # Typha support: controlled by the ConfigMap.
+ - name: FELIX_TYPHAK8SSERVICENAME
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: typha_service_name
+ # Wait for the datastore.
+ - name: WAIT_FOR_DATASTORE
+ value: "true"
+ # Set based on the k8s node name.
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s,bgp"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: "autodetect"
+ # Enable IPIP
+ - name: CALICO_IPV4POOL_IPIP
+ value: "Always"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # The default IPv4 pool to create on startup if none exists.
+ # Pod IPs will be chosen from this range.
+ # Changing this value after installation will have
+ # no effect. This should fall within `--cluster-cidr`.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ host: localhost
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -bird-ready
+ - -felix-ready
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ # This container installs the Calico CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: calico/cni:v3.3.4
+ command: ["/install-cni.sh"]
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # Set the hostname based on the k8s node name.
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # CNI MTU Config variable
+ - name: CNI_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ volumes:
+ # Used by calico/node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+
+---
+
+# Create all the CustomResourceDefinitions needed for
+# Calico policy and networking mode.
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: felixconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: FelixConfiguration
+ plural: felixconfigurations
+ singular: felixconfiguration
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgppeers.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPPeer
+ plural: bgppeers
+ singular: bgppeer
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgpconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPConfiguration
+ plural: bgpconfigurations
+ singular: bgpconfiguration
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ippools.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPPool
+ plural: ippools
+ singular: ippool
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: hostendpoints.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: HostEndpoint
+ plural: hostendpoints
+ singular: hostendpoint
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterinformations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: ClusterInformation
+ plural: clusterinformations
+ singular: clusterinformation
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworkpolicies.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkPolicy
+ plural: globalnetworkpolicies
+ singular: globalnetworkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworksets.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkSet
+ plural: globalnetworksets
+ singular: globalnetworkset
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networkpolicies.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkPolicy
+ plural: networkpolicies
+ singular: networkpolicy
--- /dev/null
+# Calico Version v3.3.4
+# https://docs.projectcalico.org/v3.3/releases#v3.3.4
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-node
+rules:
+ - apiGroups: [""]
+ resources:
+ - namespaces
+ - serviceaccounts
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups: [""]
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ - apiGroups: [""]
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups: [""]
+ resources:
+ - services
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - update
+ - watch
+ - apiGroups: ["extensions"]
+ resources:
+ - networkpolicies
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups: ["networking.k8s.io"]
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - globalfelixconfigs
+ - felixconfigurations
+ - bgppeers
+ - globalbgpconfigs
+ - bgpconfigurations
+ - ippools
+ - globalnetworkpolicies
+ - globalnetworksets
+ - networkpolicies
+ - clusterinformations
+ - hostendpoints
+ verbs:
+ - create
+ - get
+ - list
+ - update
+ - watch
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+ - kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
--- /dev/null
+# yamllint disable
+---
+# Source: calico/templates/calico-config.yaml
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # Typha is disabled.
+ typha_service_name: "none"
+ # Configure the backend to use.
+ calico_backend: "bird"
+
+ # Configure the MTU to use
+ veth_mtu: "1440"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.1",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "datastore_type": "kubernetes",
+ "nodename": "__KUBERNETES_NODE_NAME__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+
+---
+# Source: calico/templates/kdd-crds.yaml
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: felixconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: FelixConfiguration
+ plural: felixconfigurations
+ singular: felixconfiguration
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamblocks.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMBlock
+ plural: ipamblocks
+ singular: ipamblock
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: blockaffinities.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BlockAffinity
+ plural: blockaffinities
+ singular: blockaffinity
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamhandles.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMHandle
+ plural: ipamhandles
+ singular: ipamhandle
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ipamconfigs.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPAMConfig
+ plural: ipamconfigs
+ singular: ipamconfig
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgppeers.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPPeer
+ plural: bgppeers
+ singular: bgppeer
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: bgpconfigurations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: BGPConfiguration
+ plural: bgpconfigurations
+ singular: bgpconfiguration
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: ippools.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: IPPool
+ plural: ippools
+ singular: ippool
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: hostendpoints.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: HostEndpoint
+ plural: hostendpoints
+ singular: hostendpoint
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: clusterinformations.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: ClusterInformation
+ plural: clusterinformations
+ singular: clusterinformation
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworkpolicies.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkPolicy
+ plural: globalnetworkpolicies
+ singular: globalnetworkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: globalnetworksets.crd.projectcalico.org
+spec:
+ scope: Cluster
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: GlobalNetworkSet
+ plural: globalnetworksets
+ singular: globalnetworkset
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networkpolicies.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkPolicy
+ plural: networkpolicies
+ singular: networkpolicy
+
+---
+
+apiVersion: apiextensions.k8s.io/v1beta1
+kind: CustomResourceDefinition
+metadata:
+ name: networksets.crd.projectcalico.org
+spec:
+ scope: Namespaced
+ group: crd.projectcalico.org
+ version: v1
+ names:
+ kind: NetworkSet
+ plural: networksets
+ singular: networkset
+---
+# Source: calico/templates/rbac.yaml
+
+# Include a clusterrole for the kube-controllers component,
+# and bind it to the calico-kube-controllers serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+rules:
+ # Nodes are watched to monitor for deletions.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - watch
+ - list
+ - get
+ # Pods are queried to check for existence.
+ - apiGroups: [""]
+ resources:
+ - pods
+ verbs:
+ - get
+ # IPAM resources are manipulated when nodes are deleted.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ verbs:
+ - list
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ # Needs access to update clusterinformations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - clusterinformations
+ verbs:
+ - get
+ - create
+ - update
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Include a clusterrole for the calico-node DaemonSet,
+# and bind it to the calico-node serviceaccount.
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: calico-node
+rules:
+ # The CNI plugin needs to get pods, nodes, and namespaces.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - endpoints
+ - services
+ verbs:
+ # Used to discover service IPs for advertisement.
+ - watch
+ - list
+ # Used to discover Typhas.
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes/status
+ verbs:
+ # Needed for clearing NodeNetworkUnavailable flag.
+ - patch
+ # Calico stores some configuration information in node annotations.
+ - update
+ # Watch for changes to Kubernetes NetworkPolicies.
+ - apiGroups: ["networking.k8s.io"]
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+ # Used by Calico for policy information.
+ - apiGroups: [""]
+ resources:
+ - pods
+ - namespaces
+ - serviceaccounts
+ verbs:
+ - list
+ - watch
+ # The CNI plugin patches pods/status.
+ - apiGroups: [""]
+ resources:
+ - pods/status
+ verbs:
+ - patch
+ # Calico monitors various CRDs for config.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - globalfelixconfigs
+ - felixconfigurations
+ - bgppeers
+ - globalbgpconfigs
+ - bgpconfigurations
+ - ippools
+ - ipamblocks
+ - globalnetworkpolicies
+ - globalnetworksets
+ - networkpolicies
+ - networksets
+ - clusterinformations
+ - hostendpoints
+ verbs:
+ - get
+ - list
+ - watch
+ # Calico must create and update some CRDs on startup.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ippools
+ - felixconfigurations
+ - clusterinformations
+ verbs:
+ - create
+ - update
+ # Calico stores some configuration information on the node.
+ - apiGroups: [""]
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+ # These permissions are only requried for upgrade from v2.6, and can
+ # be removed after upgrade or on fresh installations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - bgpconfigurations
+ - bgppeers
+ verbs:
+ - create
+ - update
+ # These permissions are required for Calico CNI to perform IPAM allocations.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ - ipamblocks
+ - ipamhandles
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - delete
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - ipamconfigs
+ verbs:
+ - get
+ # Block affinities must also be watchable by confd for route aggregation.
+ - apiGroups: ["crd.projectcalico.org"]
+ resources:
+ - blockaffinities
+ verbs:
+ - watch
+ # The Calico IPAM migration needs to get daemonsets. These permissions can be
+ # removed if not upgrading from an installation using host-local IPAM.
+ - apiGroups: ["apps"]
+ resources:
+ - daemonsets
+ verbs:
+ - get
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+- kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
+
+---
+# Source: calico/templates/calico-node.yaml
+# This manifest installs the calico-node container, as well
+# as the CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical add-on, ensuring it gets
+ # priority scheduling and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ priorityClassName: system-node-critical
+ initContainers:
+ # This container performs upgrade from host-local IPAM to calico-ipam.
+ # It can be deleted if this is a fresh installation, or if you have already
+ # upgraded to use calico-ipam.
+ - name: upgrade-ipam
+ image: calico/cni:v3.8.4
+ command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
+ env:
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ volumeMounts:
+ - mountPath: /var/lib/cni/networks
+ name: host-local-net-dir
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ securityContext:
+ privileged: true
+ # This container installs the CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: calico/cni:v3.8.4
+ command: ["/install-cni.sh"]
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # Set the hostname based on the k8s node name.
+ - name: KUBERNETES_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # CNI MTU Config variable
+ - name: CNI_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # Prevents the container from sleeping forever.
+ - name: SLEEP
+ value: "false"
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ securityContext:
+ privileged: true
+ # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
+ # to communicate with Felix over the Policy Sync API.
+ - name: flexvol-driver
+ image: calico/pod2daemon-flexvol:v3.8.4
+ volumeMounts:
+ - name: flexvol-driver-host
+ mountPath: /host/driver
+ securityContext:
+ privileged: true
+ containers:
+ # Runs calico-node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: calico/node:v3.8.4
+ env:
+ # Use Kubernetes API as the backing datastore.
+ - name: DATASTORE_TYPE
+ value: "kubernetes"
+ # Wait for the datastore.
+ - name: WAIT_FOR_DATASTORE
+ value: "true"
+ # Set based on the k8s node name.
+ - name: NODENAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s,bgp"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: "autodetect"
+ # Enable IPIP
+ - name: CALICO_IPV4POOL_IPIP
+ value: "Always"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # The default IPv4 pool to create on startup if none exists. Pod IPs will be
+ # chosen from this range. Changing this value after installation will have
+ # no effect. This should fall within `--cluster-cidr`.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ host: localhost
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -bird-ready
+ - -felix-ready
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ - name: policysync
+ mountPath: /var/run/nodeagent
+ volumes:
+ # Used by calico-node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+ # Mount in the directory for host-local IPAM allocations. This is
+ # used when upgrading from host-local to calico-ipam, and can be removed
+ # if not using the upgrade-ipam init container.
+ - name: host-local-net-dir
+ hostPath:
+ path: /var/lib/cni/networks
+ # Used to create per-pod Unix Domain Sockets
+ - name: policysync
+ hostPath:
+ type: DirectoryOrCreate
+ path: /var/run/nodeagent
+ # Used to install Flex Volume Driver
+ - name: flexvol-driver-host
+ hostPath:
+ type: DirectoryOrCreate
+ path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+
+---
+# Source: calico/templates/calico-kube-controllers.yaml
+
+# See https://github.com/projectcalico/kube-controllers
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+spec:
+ # The controllers can only have a single active instance.
+ replicas: 1
+ selector:
+ matchLabels:
+ k8s-app: calico-kube-controllers
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ serviceAccountName: calico-kube-controllers
+ priorityClassName: system-cluster-critical
+ containers:
+ - name: calico-kube-controllers
+ image: calico/kube-controllers:v3.8.4
+ env:
+ # Choose which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: node
+ - name: DATASTORE_TYPE
+ value: kubernetes
+ readinessProbe:
+ exec:
+ command:
+ - /usr/bin/check-status
+ - -r
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+---
+# Source: calico/templates/calico-etcd-secrets.yaml
+
+---
+# Source: calico/templates/calico-typha.yaml
+
+---
+# Source: calico/templates/configure-canal.yaml
+
+
--- /dev/null
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# This script is to install common software for ELIOT.
+# To be executed in Eliot Manager and Eliot Nodes.
+# Script will install Docker software.
+# Script has to be executed in Ubuntu 16.04.
+
+# Set Docker version
+DOCKER_VERSION=18.06.1~ce~3-0~ubuntu
+
+sudo apt-get update && sudo apt-get install -y git
+
+# Install Docker as Prerequisite
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+sudo apt-key fingerprint 0EBFCD88
+sudo add-apt-repository \
+ "deb https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+
+sudo apt update
+sudo apt install -y docker-ce=${DOCKER_VERSION}
+
--- /dev/null
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+# constants
+
+DOCKER_VERSION=18.09.6
+KUBE_VERSION=1.16.0-0
+MACHINE=$(uname -m)
+
+# start
+
+# This script will install docker, kubeadm on both Eliot Master and Edge nodes
+
+sudo sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' \
+/etc/sysconfig/selinux
+
+sudo modprobe br_netfilter
+_conf='/etc/sysctl.d/99-akraino-eliot.conf'
+echo 'net.bridge.bridge-nf-call-iptables = 1' |& sudo tee "${_conf}"
+sudo sysctl -q -p "${_conf}"
+
+#echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables
+
+swapoff -a
+
+sudo yum install -y yum-utils device-mapper-persistent-data lvm2
+
+sudo yum-config-manager \
+--add-repo https://download.docker.com/linux/centos/docker-ce.repo
+
+sudo yum install docker-ce-${DOCKER_VERSION} docker-ce-cli-${DOCKER_VERSION} \
+containerd.io
+
+# Kubernetes repository set
+
+cat <<-EOF | sudo tee /etc/yum.repos.d/kubernetes.repo
+[kubernetes]
+name=Kubernetes
+baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-${MACHINE}
+enabled=1
+gpgcheck=1
+repo_gpgcheck=1
+gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
+ https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
+EOF
+
+# Set SELinux in permissive mode (effectively disabling it)
+setenforce 0
+sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
+
+yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
+systemctl enable --now kubelet
+
+sudo yum install -y kubeadm-${KUBE_VERSION}
+sudo systemctl start docker && sudo systemctl enable docker
+
+sudo systemctl daemon-reload
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+promyml=~/prometheus.yml
+workernodeip=""
+blank=""
+count=1
+firstline=1
+while read line
+do
+ if [ $count -gt $firstline ]; then
+ workernodeip+="','"
+ fi
+ nodeinfo="${line}"
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ echo $nodeip
+ workernodeip+=$nodeip
+ workernodeip+=":8081"
+ echo $workernodeip
+ count=2
+ echo $count
+done < nodelist > /dev/null 2>&1
+
+echo "workernodeip="
+echo $workernodeip
+
+cat <<EOF > "${promyml}"
+---
+global:
+ scrape_interval: 15s
+
+scrape_configs:
+ - job_name: 'prometheus'
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['localhost:9090']
+
+ - job_name: cadvisor
+ scrape_interval: 5s
+ static_configs:
+ - targets: ['$workernodeip']
+EOF
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+KUBE_VERSION=1.16.0-00
+POD_NETWORK_CIDR=192.168.0.0/16
+K8S_CNI_VERSION=0.7.5-00
+
+#K8s service CIDR range
+K8s_SVC_CIDR=10.96.0.0/12
+
+# Install Kubernetes with Kubeadm
+
+# Disable swap
+sudo swapoff -a
+sudo apt update
+sudo apt install -y apt-transport-https curl
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+
+sudo apt update
+sudo apt install -y \
+ kubernetes-cni=${K8S_CNI_VERSION} kubelet=${KUBE_VERSION} \
+ kubeadm=${KUBE_VERSION} kubectl=${KUBE_VERSION}
+
+sudo apt-mark hold kubelet kubeadm kubectl
+
+if ! kubectl get nodes; then
+ hostname -I > hostname.tmp
+ MASTER_IP="$(cut -d ' ' -f 1 hostname.tmp)"
+ rm hostname.tmp
+ sudo kubeadm config images pull
+ sudo kubeadm init \
+ --apiserver-advertise-address="${MASTER_IP}" \
+ --pod-network-cidr="${POD_NETWORK_CIDR}" \
+ --service-cidr="${K8s_SVC_CIDR}"
+
+ if [ "$(id -u)" = 0 ]; then
+ KUBECONFIG=/etc/kubernetes/admin.conf
+ echo "export KUBECONFIG=/etc/kubernetes/admin.conf" | \
+ tee -a "${HOME}/.profile"
+ source "${HOME}/.profile"
+ else
+ mkdir -p "${HOME}/.kube"
+ sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+ sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
+ fi
+ #kubectl apply -f "cni/calico/rbac.yaml"
+ kubectl apply -f "cni/calico/v38/calico.yaml"
+
+fi
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+# constants
+
+POD_NETWORK_CIDR=192.168.0.0/16
+KUBE_VERSION=1.16.0-0
+KUBERNETES_CNI=0.7.5-0
+
+# start
+
+hostname -I > hostname.tmp
+MASTER_IP="$(cut -d ' ' -f 1 hostname.tmp)"
+rm hostname.tmp
+
+# kubernetes installation
+
+sudo yum install -y kubelet-${KUBE_VERSION} kubectl-${KUBE_VERSION} \
+kubernetes-cni-${KUBERNETES_CNI}
+
+sudo systemctl daemon-reload
+sudo systemctl restart kubelet
+
+# Initialize kubernetes on master
+
+sudo kubeadm init \
+ --apiserver-advertise-address="${MASTER_IP}" \
+ --pod-network-cidr="${POD_NETWORK_CIDR}"
+
+mkdir -p "${HOME}/.kube"
+sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+KUBE_VERSION=1.16.0-00
+K8S_CNI_VERSION=0.7.5-00
+
+# Install Kubernetes with Kubeadm
+# The script will be executed in Eliot Edge Node
+
+sudo swapoff -a
+sudo apt update
+sudo apt install -y apt-transport-https curl
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+
+cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
+deb https://apt.kubernetes.io/ kubernetes-xenial main
+EOF
+
+sudo apt update
+sudo apt install -y \
+ kubeadm=${KUBE_VERSION} kubelet=${KUBE_VERSION} kubernetes-cni=${K8S_CNI_VERSION}
+
+#sudo apt-mark hold kubelet kubeadm
--- /dev/null
+
+######################################################################
+# #
+# The script is to undo the changes on ELIOT Manager and ELIOT nodes #
+# done by setup.sh file. #
+# It uninstalls docker, kubernetes. #
+# It releases the port used. #
+# It deletes the files created for kubernetes in node machine #
+# Script is tested in Ubuntu 16.04 version. #
+######################################################################
+
+# constants
+OSPLATFORM=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+
+show_help()
+{
+ echo "This script will remove docker and its related files from the master and node machines"
+ echo "This script will remove kubeadm kubectl kubelet kubernetes from the master and node machines"
+ echo "The changes will be first executed on manager machine and then node machines."
+ echo "It will pick the node machine details from nodelist file"
+ echo "This file supports Linux- Ubuntu version only"
+}
+
+# Rollbacking the changes on ELIOT Manager Node
+rollback_k8smaster()
+{
+if [ "$(id -u)" = 0 ]; then
+ sudo apt-get install iptables
+ sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
+ sudo apt-get install ipvsadm
+ sudo fuser -k -n tcp 10250
+ sudo yes y | apt-get purge -y docker-engine
+ sudo yes y | apt-get purge -y docker
+ sudo yes y | apt-get purge -y docker.io
+ sudo yes y | apt-get purge -y docker-ce
+ sudo yes y | apt-get purge -y docker-ce-cli
+ sudo yes y | groupdel docker
+ sudo yes y | kubeadm reset
+ sudo yes y | apt-get purge kubeadm
+ sudo yes y | apt-get purge kubectl
+ sudo yes y | apt-get purge kubelet
+ sudo yes y | apt-get purge kube*
+ sudo yes y | apt-get purge kubernetes-cni
+ sudo rm -rf ~/.kube
+ sudo yes y | apt-get autoremove
+ sudo yes y | apt-get autoclean
+else
+ sudo fuser -k -n tcp 10250
+ sudo yes y | sudo apt-get purge -y docker-engine
+ sudo yes y | sudo apt-get purge -y docker
+ sudo yes y | sudo apt-get purge -y docker.io
+ sudo yes y | sudo apt-get purge -y docker-ce
+ sudo yes y | sudo apt-get purge -y docker-ce-cli
+ sudo yes y | sudo kubeadm reset
+ sudo yes y | sudo apt-get purge kubeadm
+ sudo yes y | sudo apt-get purge kubectl
+ sudo yes y | sudo apt-get purge kubelet
+ sudo yes y | sudo apt-get purge kube*
+ sudo yes y | sudo apt-get purge kubernetes-cni
+ sudo rm -rf ~/.kube
+fi
+
+rollback_k8sworkers
+
+}
+
+#Rollbacking the changes on ELIOT Worker Node
+rollback_k8sworkers()
+{
+if [ " $(id -u)" = 0]; then
+ INSTALL_IPVSADM="sudo apt-get install ipvsadm"
+ RESET_PORT="fuser -k -n tcp 10250"
+ #REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf !('manifests') "
+ REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+ REMOVE_DOCKER1="sudo yes y | apt-get purge -y docker-engine"
+ REMOVE_DOCKER2="sudo yes y | apt-get purge -y docker"
+ REMOVE_DOCKER3="sudo yes y | apt-get purge -y docker.io"
+ REMOVE_DOCKER4="sudo yes y | apt-get purge -y docker-ce"
+ REMOVE_DOCKER5="sudo yes y | apt-get purge -y docker-ce-cli"
+ REMOVE_DOCKER6="sudo yes y | groupdel docker"
+ RESET_KUBEADM="sudo yes y | kubeadm reset"
+ REMOVE_KUBE_FILES1="sudo yes y | apt-get purge kubeadm"
+ REMOVE_KUBE_FILES2="sudo yes y | apt-get purge kubectl "
+ REMOVE_KUBE_FILES3="sudo yes y | apt-get purge kubelet "
+ REMOVE_KUBE_FILES4="sudo yes y | apt-get purge kube* "
+ REMOVE_KUBE_FILES5="sudo yes y | apt-get purge kubernetes-cni"
+ REMOVE_KUBE_FILES6="sudo rm -rf ~/.kube"
+ AUTO_REMOVE="sudo yes y | apt-get autoremove"
+ AUTO_CLEAN="sudo yes y | apt-get autoclean"
+else
+ RESET_PORT="fuser -k -n tcp 10250"
+ REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+ REMOVE_DOCKER1="sudo yes y | sudo apt-get purge -y docker-engine"
+ REMOVE_DOCKER2="sudo yes y | sudo apt-get purge -y docker"
+ REMOVE_DOCKER3="sudo yes y | sudo apt-get purge -y docker.io"
+ REMOVE_DOCKER4="sudo yes y | sudo apt-get purge -y docker-ce"
+ REMOVE_DOCKER5="sudo yes y | sudo apt-get purge -y docker-ce-cli"
+ REMOVE_DOCKER6="sudo yes y | sudo groupdel docker"
+ RESET_KUBEADM="sudo yes y | sudo kubeadm reset"
+ REMOVE_KUBE_FILES1="sudo yes y | sudo apt-get purge kubeadm"
+ REMOVE_KUBE_FILES2="sudo yes y | sudo apt-get purge kubectl "
+ REMOVE_KUBE_FILES3="sudo yes y | sudo apt-get purge kubelet "
+ REMOVE_KUBE_FILES4="sudo yes y | sudo apt-get purge kube* "
+ REMOVE_KUBE_FILES5="sudo yes y | sudo apt-get purge kubernetes-cni"
+ REMOVE_KUBE_FILES6="sudo rm -rf ~/.kube"
+fi
+
+#Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${INSTALL_IPVSADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_PORT} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER1} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER2} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER3} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER4} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER5} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_DOCKER6} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_KUBEADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES1} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES2} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES3} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES4} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES5} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES6} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${AUTO_REMOVE} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${AUTO_CLEAN} < /dev/null
+ done < nodelist > /dev/null 2>&1
+
+}
+
+verify_reset_status()
+{
+echo "Success!!"
+}
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ show_help
+ exit 0
+fi
+
+if [[ $OSPLATFORM = *Ubuntu* ]]; then
+ rollback_k8smaster
+ verify_reset_status
+else
+ echo "Script only supports Ubuntu Version."
+fi
--- /dev/null
+########################################################################################
+# #
+# The script is to reset the settings on ELIOT Manager and ELIOT nodes #
+# before running the setup.sh file again on the same setup. #
+# It resets the settings of kubeadm and restarts its service #
+# It releases the ports used. #
+# It deletes the files created for kubernetes on node machine #
+# Script is tested in Ubuntu 16.04 version. #
+########################################################################################
+
+# constants
+OSPLATFORM=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+
+show_help()
+{
+ echo "The script is to reset the settings on ELIOT Manager and ELIOT nodes which "
+ echo "needs to be done before executing the setup.sh file again."
+ echo "The changes will be first executed on manager machine and then on the node machines."
+ echo "It will pick the node machine details from nodelist file"
+}
+
+# Resetting ELIOT Manager Node
+reset_k8smaster()
+{
+ sudo yes y | kubeadm reset
+ sudo apt-get install iptables
+ sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
+ sudo apt-get install ipvsadm
+ sudo systemctl restart kubelet
+ sudo fuser -k -n tcp 10250
+
+reset_k8sworkers
+}
+
+#Resetting ELIOT Worker Node
+reset_k8sworkers()
+{
+RESET_KUBEADM="sudo yes y | kubeadm reset"
+INSTALL_IPVSADM="sudo apt-get install ipvsadm"
+RESTART_KUBELET="sudo systemctl restart kubelet"
+RESET_PORT="sudo fuser -k -n tcp 10250"
+#REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf !('manifests') "
+REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+REMOVE_CADVISOR_FILES="docker rm cadvisor-iot-node1"
+
+#Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_KUBEADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${INSTALL_IPVSADM} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESTART_KUBELET} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${RESET_PORT} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_KUBE_FILES} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${REMOVE_CADVISOR_FILES} < /dev/null
+ done < nodelist > /dev/null 2>&1
+}
+
+verify_reset_status()
+{
+echo "Success!!"
+}
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ show_help
+ exit 0
+fi
+
+if [[ $OSPLATFORM = *Ubuntu* ]]; then
+ reset_k8smaster
+ verify_reset_status
+else
+ echo "The script supports only Linux - Ubuntu"
+fi
--- /dev/null
+<eliotedgenodeusername>|<eliotedgenodeip>|<eliotedgenodepassword>
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+echo "**********************************************************************"
+echo "Prometheus setup ----------------------------------------------STARTED"
+
+# start
+PROMETHEUS_HOST_PORT="9090"
+PROMETHEUS_CONTAINTER_PORT="9090"
+#cp ci_management/prometheus.yml $HOME
+source generatePromeyml.sh
+if [ ! -d "/etc/prometheus" ]; then
+ sudo mkdir /etc/prometheus
+fi
+
+sudo docker run -p ${PROMETHEUS_HOST_PORT}:${PROMETHEUS_CONTAINTER_PORT} \
+ -v ~/prometheus.yml:/etc/prometheus/prometheus.yml \
+ -d prom/prometheus \
+ --config.file=/etc/prometheus/prometheus.yml
+
+echo "Prometheus setup ----------------------------------------------SUCCESS"
+echo "**********************************************************************"
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+########################################################################################
+# #
+# The script is to setup the ELIOT Manager and ELIOT nodes. #
+# It installs Docker in both ELIOT Manager and ELIOT node. #
+# It installs Kubernetes. In the ELIOT Manager kubeadm, kubelet, kubectl is installed. #
+# In ELIOT Edge Node it will install kubeadn, kubelet. #
+# Script is tested in Ubuntu 16.04 version. #
+# sshpass needs to be installed before executing this script. #
+########################################################################################
+
+echo "********************************************************************************"
+echo "ELIOT SD-WAN/WAN EDGE/uCPE Platform Deployment--------------------------STARTED"
+
+# constants
+
+OSPLATFORM=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
+ELIOT_REPO="https://gerrit.akraino.org/r/eliot"
+
+show_help()
+{
+ echo "The script helps in setting up the ELIOT Toplogy Infrastrucutre"
+ echo "The setup installs Docker, K8S Master and K8S worker nodes in "
+ echo "ELIOT Manager and ELIOT Workder Nodes respectively "
+ echo "After completion of script execution execute command: "
+ echo "kubectl get nodes to check whether the connection between "
+ echo "ELIOT Manager and ELIOT Nodes are established"
+ echo ""
+ echo "Nodelist file should have the details of Worker Nodes in the format of:"
+ echo "EliotNodeUserName|EliotNodeIP|EliotNodePasswor"
+ echo "Each line should have detail of one ELIOT Node only"
+}
+
+# Setting up ELIOT Manager Node.
+# Installing Docker, K8S and Initializing K8S Master
+setup_k8smaster()
+{
+ #set -o xtrace
+ sudo rm -rf ~/.kube
+ source common.sh | tee eliotcommon.log
+ source k8smaster.sh | tee kubeadm.log
+ # Setup ELIOT Node
+ setup_k8sworkers
+}
+
+setup_k8sworkers()
+{
+ set -o xtrace
+
+ # Install Docker on ELIOT Node
+ SETUP_WORKER_COMMON="sudo rm -rf ~/eliot &&\
+ git clone ${ELIOT_REPO} &&\
+ cd eliot/blueprints/uCPE/scripts/ && source common.sh"
+ #SETUP_WORKER_COMMON="cd eliot/scripts/ && source common.sh"
+ SETUP_WORKER="cd eliot/blueprints/uCPE/scripts/ && source k8sworker.sh"
+
+ KUBEADM_TOKEN=$(kubeadm token create --print-join-command)
+ KUBEADM_JOIN="sudo ${KUBEADM_TOKEN}"
+
+ # Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER_COMMON} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER} < /dev/null
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${KUBEADM_JOIN} < /dev/null
+ done < nodelist > /dev/null 2>&1
+
+}
+
+setup_k8smaster_centos()
+{
+ set -o xtrace
+ sudo rm -rf ~/.kube
+ source common_centos.sh | tee eliotcommon_centos.log
+ source k8smaster_centos.sh | tee kubeadm_centos.log
+
+ # Setup ELIOT Node
+ setup_k8sworkers_centos
+
+ kubectl apply -f cni/calico/v38/calico.yaml
+
+}
+
+
+setup_k8sworkers_centos()
+{
+ set -o xtrace
+ # Install Docker on ELIOT Node
+
+ SETUP_WORKER_COMMON_CENTOS="sudo rm -rf ~/eliot &&\
+ git clone ${ELIOT_REPO} &&\
+ cd eliot/blueprints/uCPE/scripts/ && source common_centos.sh"
+
+ # SETUP_WORKER_COMMON_CENTOS="cd /root/eliot/scripts/ && source common_centos.sh"
+
+ KUBEADM_TOKEN=$(sudo kubeadm token create --print-join-command)
+ KUBEADM_JOIN_CENTOS="sudo ${KUBEADM_TOKEN}"
+ # Read all the Worker Node details from nodelist file.
+ while read line
+ do
+ nodeinfo="${line}" < /dev/null 2>&1
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1) < /dev/null
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2) < /dev/null
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3) < /dev/null
+ sudo sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_WORKER_COMMON_CENTOS} < /dev/null
+ sudo sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${KUBEADM_JOIN_CENTOS} < /dev/null
+ done < nodelist > /dev/null 2>&1
+
+}
+
+# verify kubernetes setup by deploying nginx server.
+
+verify_k8s_status(){
+ set -o xtrace
+ source verifyk8s.sh | tee verifyk8s.log
+}
+
+# install_edgex method removed
+
+install_cadvisor_edge(){
+ set -o xtrace
+ SETUP_CADVISOR_ATEDGE="cd eliot/blueprints/uCPE/scripts/ && source cadvisorsetup.sh"
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${SETUP_CADVISOR_ATEDGE} < /dev/null
+ done < nodelist > /dev/null 2>&1
+}
+
+install_prometheus(){
+set -o xtrace
+source prometheus.sh | tee install_prometheus.log
+}
+
+# Start
+#
+
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ show_help
+ exit 0
+fi
+
+if [[ $OSPLATFORM = *CentOS* ]]; then
+ setup_k8smaster_centos
+else
+ setup_k8smaster
+fi
+
+sleep 20
+verify_k8s_status
+install_cadvisor_edge
+sleep 10
+install_prometheus
+sleep 5
+sudo docker ps | grep prometheus
+
+# install_edgex removed
+
+sleep 20
+
+# Removing the taint from master node
+kubectl taint nodes --all node-role.kubernetes.io/master- || true
+
+echo "********************************************************************************"
+echo "ELIOT SD-WAN/WAN EDGE/uCPE Platform Deployment--------------------------SUCCESS"
+
--- /dev/null
+# edge node user name
+EDGENODEUSR=""
+
+# edge node ip
+EDGENODEIP=""
+
+# edge node password
+EDGENODEPASSWORD=""
+
+# master node user name
+MASTERNODEUSR=""
+
+# master node ip
+MASTERNODEIP=""
+
+# master node password
+MASTERNODEPASSWORD=""
+
+# eliot source code path including eliot folder
+PATH_OF_ELIOTFOLDER=""
+
+# home path of edge node
+HOME_EDGENODE=""
+
+# edgenode id for kubeedge configuration
+EDGENODEID=""
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+#Constants
+KUBEEDGE_SRC="$GOPATH/src/github.com/kubeedge/kubeedge"
+KUBEEDGE_BIN="$GOPATH/src/github.com/kubeedge/kubeedge/keadm"
+VERIFY_K8S="$PATH_OF_ELIOTFOLDER/blueprints/uCPE/scripts/verifyk8s.sh"
+
+{ set +x; } > /dev/null 2>&1
+
+if [ -n "$1" ]; then
+
+if [ "$1" != "--help" ]; then
+ echo ""
+ echo "Usage of the command is wrong.. Please type ./kubeedge_setup.sh --help for more details"
+ echo ""
+ exit 0
+fi
+
+fi
+
+if [ "$1" == "--help" ]; then
+ echo ""
+ echo "This script will setup the kubeedge installation on Eliot master and Eliot edge"
+ echo "Before Executing this, add Eliot master and Eliot edge details in config_kubeedge file"
+ echo ""
+ exit 0; set -x;
+fi
+
+# take_keedge will download the source code of kubeedge in master and in edge
+
+take_keedge(){
+
+ source ~/.profile
+ git clone https://github.com/kubeedge/kubeedge.git \
+ $KUBEEDGE_SRC
+ cd $KUBEEDGE_BIN
+ make
+}
+
+source config_kubeedge > /dev/null 2>&1
+
+common_steps="echo $GOPATH && \
+git clone https://github.com/kubeedge/kubeedge.git $KUBEEDGE_SRC && \
+source ~/.profile && \
+cd $GOPATH/src && \
+sudo chmod -R 777 github.com && \
+cd $KUBEEDGE_BIN && \
+make"
+
+edge_start="cd $KUBEEDGE_BIN && \
+sudo chmod +x keadm && \
+sudo ./keadm join --edgecontrollerip=$MASTERNODEIP --edgenodeid=$EDGENODEID \
+--k8sserverip=$MASTERNODEIP:8080"
+
+# Initialisation of ELIOT master with kubeedge
+
+execute_keedge_controller(){
+ cd $KUBEEDGE_BIN
+ sudo chmod +x keadm
+ sudo ./keadm init
+}
+
+# Initialisation of Eliot edge with kubeedge
+
+exec_edge(){
+
+ cd $PATH_OF_ELIOTFOLDER/blueprints/uCPE/scripts/src
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp $PATH_OF_ELIOTFOLDER/blueprints/uCPE/scripts/src/config_kubeedge \
+ ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ source config_kubeedge
+
+ source config_kubeedge > /dev/null 2>&1
+ sshpass -p ${EDGENODEPASSWORD} \
+ ssh ${EDGENODEUSR}@${EDGENODEIP} ${common_steps}
+
+ echo "After cloning the code in ELIOT edge node"
+ sshpass -p ${EDGENODEPASSWORD} \
+ scp /etc/kubeedge/certs.tgz ${EDGENODEUSR}@${EDGENODEIP}:$HOME_EDGENODE
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ ssh ${EDGENODEUSR}@${EDGENODEIP} \
+ sudo tar -xvzf $HOME/certs.tgz --directory /etc/kubeedge
+
+ sshpass -p ${EDGENODEPASSWORD} \
+ ssh ${EDGENODEUSR}@${EDGENODEIP} ${edge_start}
+}
+
+# start
+
+source config_kubeedge > /dev/null 2>&1
+
+take_keedge
+
+execute_keedge_controller
+
+exec_edge > /dev/null 2>&1
+
+sleep 10
+sudo kubectl get nodes
+
+if [ "$(id -u)" = 0 ]; then
+ echo "export KUBECONFIG=/etc/kubernetes/admin.conf" | \
+tee -a "${HOME}/.profile"
+ source "${HOME}/.profile"
+else
+ mkdir -p "${HOME}/.kube"
+ sudo cp -i /etc/kubernetes/admin.conf "${HOME}/.kube/config"
+ sudo chown "$(id -u)":"$(id -g)" "${HOME}/.kube/config"
+fi
+
+chmod +x $VERIFY_K8S
+source $VERIFY_K8S
+
--- /dev/null
+#!/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+NGINXDEP=~/testk8s-nginx.yaml
+
+cat <<EOF > "${NGINXDEP}"
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx-deployment
+ labels:
+ app: nginx
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.15.12
+ ports:
+ - containerPort: 80
+ hostPort: 80
+EOF
+
+#check if nginx is already deployed
+if ! kubectl get pods | grep nginx; then
+ kubectl create -f ~/testk8s-nginx.yaml
+fi
+
+#To check whether the deployment is succesesfull
+retry=10
+while [ $retry -gt 0 ]
+do
+ if [ 2 == "$(kubectl get pods | grep -c -e STATUS -e Running)" ]; then
+ break
+ fi
+ ((retry-=1))
+ sleep 10
+done
+[ $retry -gt 0 ] || exit 1
--- /dev/null
+# !/bin/bash -ex
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+sudo kubeadm reset
+
+if [ -f "$HOME/testk8s-nginx.yaml" ]; then
+ cd $HOME && kubectl delete -f test-k8snginx.yaml && rm -rf testk8s-nginx.yaml
+ echo "testk8s-nginx.yaml cleaned"
+fi
+
+if [ -d "/var/lib/etcd" ]; then
+ sudo rm -rf /var/lib/etcd
+ echo "etcd cleaned"
+fi
+
+KUBEADM_RESET="sudo kubeadm reset"
+ETCD_CLEAN="sudo rm -rf /var/lib/etcd"
+CLEANUP_PROM_CADVISOR="cd eliot/scripts/ci_management && ./uninstall_cadvisor.sh"
+
+# Read all the Worker Node details from nodelist file.
+echo $(pwd)
+while read line
+do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${KUBEADM_RESET}
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${ETCD_CLEAN}
+ sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} ${CLEANUP_PROM_CADVISOR}
+done < nodelist > /dev/null 2>&1
+
--- /dev/null
+#!/usr/bin/expect -f
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others. #
+# #
+# All rights reserved. This program and the accompanying materials #
+# are made available under the terms of the Apache License, Version 2.0 #
+# which accompanies this distribution, and is available at #
+# http://www.apache.org/licenses/LICENSE-2.0 #
+##############################################################################
+
+spawn ./cleanup_centos.sh
+expect "Are you sure you want to proceed? "
+send "y\n"
+
+expect "Are you sure you want to proceed? "
+send "y\n"
+interact
--- /dev/null
+#!/bin/bash -ex
+
+##############################################################################
+# Copyright (c) 2019 Huawei Tech and others.
+#
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+
+# To verify edgex platform deployment on k8s.
+
+retrytimes=10
+while [ $retrytimes -gt 0 ]
+do
+ if [ 1 == "$(kubectl get pods | grep edgex-config-seed | grep -i completed | wc -l)" ]; then
+ break
+ fi
+ ((retrytimes-=1))
+ sleep 5
+done
+[ $retrytimes -gt 0 ] || exit 1
+
+# Reset the variable to check Running status of other edgex platform microservices
+
+retrytimes=20
+while [ $retrytimes -gt 0 ]
+do
+ if [ 12 == "$(kubectl get pods | grep edgex | grep Running | wc -l)" ]; then
+ echo "Edgex Platform is successfully deployed on ELIOT !!!!"
+ break
+ fi
+ ((retrytimes-=1))
+ sleep 5
+done
+[ $retrytimes -gt 0 ] || exit 1
+
# Rollbacking the changes on ELIOT Manager Node
rollback_k8smaster()
{
-sudo apt-get install iptables
-sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
-sudo apt-get install ipvsadm
-sudo fuser -k -n tcp 10250
-sudo yes y | apt-get purge -y docker-engine
-sudo yes y | apt-get purge -y docker
-sudo yes y | apt-get purge -y docker.io
-sudo yes y | apt-get purge -y docker-ce
-sudo yes y | apt-get purge -y docker-ce-cli
-sudo yes y | groupdel docker
-sudo yes y | kubeadm reset
-sudo yes y | apt-get purge kubeadm
-sudo yes y | apt-get purge kubectl
-sudo yes y | apt-get purge kubelet
-sudo yes y | apt-get purge kube*
-sudo yes y | apt-get purge kubernetes-cni
-sudo rm -rf ~/.kube
-sudo yes y | apt-get autoremove
-sudo yes y | apt-get autoclean
+if [ "$(id -u)" = 0 ]; then
+ sudo apt-get install iptables
+ sudo iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
+ sudo apt-get install ipvsadm
+ sudo fuser -k -n tcp 10250
+ sudo yes y | apt-get purge -y docker-engine
+ sudo yes y | apt-get purge -y docker
+ sudo yes y | apt-get purge -y docker.io
+ sudo yes y | apt-get purge -y docker-ce
+ sudo yes y | apt-get purge -y docker-ce-cli
+ sudo yes y | groupdel docker
+ sudo yes y | kubeadm reset
+ sudo yes y | apt-get purge kubeadm
+ sudo yes y | apt-get purge kubectl
+ sudo yes y | apt-get purge kubelet
+ sudo yes y | apt-get purge kube*
+ sudo yes y | apt-get purge kubernetes-cni
+ sudo rm -rf ~/.kube
+ sudo yes y | apt-get autoremove
+ sudo yes y | apt-get autoclean
+else
+ sudo fuser -k -n tcp 10250
+ sudo yes y | sudo apt-get purge -y docker-engine
+ sudo yes y | sudo apt-get purge -y docker
+ sudo yes y | sudo apt-get purge -y docker.io
+ sudo yes y | sudo apt-get purge -y docker-ce
+ sudo yes y | sudo apt-get purge -y docker-ce-cli
+ sudo yes y | sudo kubeadm reset
+ sudo yes y | sudo apt-get purge kubeadm
+ sudo yes y | sudo apt-get purge kubectl
+ sudo yes y | sudo apt-get purge kubelet
+ sudo yes y | sudo apt-get purge kube*
+ sudo yes y | sudo apt-get purge kubernetes-cni
+ sudo rm -rf ~/.kube
+fi
rollback_k8sworkers
#Rollbacking the changes on ELIOT Worker Node
rollback_k8sworkers()
{
-INSTALL_IPVSADM="sudo apt-get install ipvsadm"
-RESET_PORT="fuser -k -n tcp 10250"
-#REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf !('manifests') "
-REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
-REMOVE_DOCKER1="sudo yes y | apt-get purge -y docker-engine"
-REMOVE_DOCKER2="sudo yes y | apt-get purge -y docker"
-REMOVE_DOCKER3="sudo yes y | apt-get purge -y docker.io"
-REMOVE_DOCKER4="sudo yes y | apt-get purge -y docker-ce"
-REMOVE_DOCKER5="sudo yes y | apt-get purge -y docker-ce-cli"
-REMOVE_DOCKER6="sudo yes y | groupdel docker"
-RESET_KUBEADM="sudo yes y | kubeadm reset"
-REMOVE_KUBE_FILES1="sudo yes y | apt-get purge kubeadm"
-REMOVE_KUBE_FILES2="sudo yes y | apt-get purge kubectl "
-REMOVE_KUBE_FILES3="sudo yes y | apt-get purge kubelet "
-REMOVE_KUBE_FILES4="sudo yes y | apt-get purge kube* "
-REMOVE_KUBE_FILES5="sudo yes y | apt-get purge kubernetes-cni"
-REMOVE_KUBE_FILES6="sudo rm -rf ~/.kube"
-AUTO_REMOVE="sudo yes y | apt-get autoremove"
-AUTO_CLEAN="sudo yes y | apt-get autoclean"
-
+if [ " $(id -u)" = 0]; then
+ INSTALL_IPVSADM="sudo apt-get install ipvsadm"
+ RESET_PORT="fuser -k -n tcp 10250"
+ #REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf !('manifests') "
+ REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+ REMOVE_DOCKER1="sudo yes y | apt-get purge -y docker-engine"
+ REMOVE_DOCKER2="sudo yes y | apt-get purge -y docker"
+ REMOVE_DOCKER3="sudo yes y | apt-get purge -y docker.io"
+ REMOVE_DOCKER4="sudo yes y | apt-get purge -y docker-ce"
+ REMOVE_DOCKER5="sudo yes y | apt-get purge -y docker-ce-cli"
+ REMOVE_DOCKER6="sudo yes y | groupdel docker"
+ RESET_KUBEADM="sudo yes y | kubeadm reset"
+ REMOVE_KUBE_FILES1="sudo yes y | apt-get purge kubeadm"
+ REMOVE_KUBE_FILES2="sudo yes y | apt-get purge kubectl "
+ REMOVE_KUBE_FILES3="sudo yes y | apt-get purge kubelet "
+ REMOVE_KUBE_FILES4="sudo yes y | apt-get purge kube* "
+ REMOVE_KUBE_FILES5="sudo yes y | apt-get purge kubernetes-cni"
+ REMOVE_KUBE_FILES6="sudo rm -rf ~/.kube"
+ AUTO_REMOVE="sudo yes y | apt-get autoremove"
+ AUTO_CLEAN="sudo yes y | apt-get autoclean"
+else
+ RESET_PORT="fuser -k -n tcp 10250"
+ REMOVE_KUBE_FILES="cd /etc/kubernetes && sudo rm -rf bootstrap-kubelet.conf kubelet.conf pki"
+ REMOVE_DOCKER1="sudo yes y | sudo apt-get purge -y docker-engine"
+ REMOVE_DOCKER2="sudo yes y | sudo apt-get purge -y docker"
+ REMOVE_DOCKER3="sudo yes y | sudo apt-get purge -y docker.io"
+ REMOVE_DOCKER4="sudo yes y | sudo apt-get purge -y docker-ce"
+ REMOVE_DOCKER5="sudo yes y | sudo apt-get purge -y docker-ce-cli"
+ REMOVE_DOCKER6="sudo yes y | sudo groupdel docker"
+ RESET_KUBEADM="sudo yes y | sudo kubeadm reset"
+ REMOVE_KUBE_FILES1="sudo yes y | sudo apt-get purge kubeadm"
+ REMOVE_KUBE_FILES2="sudo yes y | sudo apt-get purge kubectl "
+ REMOVE_KUBE_FILES3="sudo yes y | sudo apt-get purge kubelet "
+ REMOVE_KUBE_FILES4="sudo yes y | sudo apt-get purge kube* "
+ REMOVE_KUBE_FILES5="sudo yes y | sudo apt-get purge kubernetes-cni"
+ REMOVE_KUBE_FILES6="sudo rm -rf ~/.kube"
+fi
#Read all the Worker Node details from nodelist file.
while read line
--- /dev/null
+FROM centos:centos7
+ADD ./work/ /root/work
+RUN yum install -y gcc git
+RUN yum groupinstall -y 'Development Tools'
+RUN cd /root/work && ls -al && tar xzf cmake-3.15.2.tar.gz && \
+ rpm -Uvh scons-2.3.0-1.el7.centos.noarch.rpm && \
+ python get-pip.py && \
+ cd cmake-3.15.2 && ./bootstrap && \
+ make && \
+ make install && \
+ cd ../protocol-opcua-c/ && \
+ ./build.sh
--- /dev/null
+#!/bin/bash
+set -o errexit
+
+# set the docker name and docker tag when you build
+DOCKER_NAME=
+DOCKER_TAG=
+
+export ELIOT_DIR=$(cd $(dirname $0); pwd)
+export WORK_DIR=$ELIOT_DIR/work
+export CMAKE_URL=https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2.tar.gz
+export SCONS_PPA_URL=http://repo.okay.com.mx/centos/7/x86_64/release//scons-2.3.0-1.el7.centos.noarch.rpm
+export GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py
+export OPCUA_REPO=https://github.com/edgexfoundry-holding/protocol-opcua-c.git
+export DOCKER_NAME=${DOCKER_NAME:-"eliot/opc-ua"}
+export DOCKER_TAG=${DOCKER_TAG:-"latest"}
+
+
+rm -rf $WORK_DIR
+mkdir -p $WORK_DIR
+
+cd $WORK_DIR
+wget $CMAKE_URL
+wget $SCONS_PPA_URL
+wget $GET_PIP_URL
+git clone $OPCUA_REPO
+
+cd $ELIOT_DIR
+docker build ./ -t $DOCKER_NAME:$DOCKER_TAG
--- /dev/null
+#!/bin/bash
+set -o errexit
+
+# set the docker name and docker tag when you build
+# export DOCKER_NAME=eliot/opc-ua
+# export DOCKER_TAG=latest
+
+export ELIOT_DIR=$(cd $(dirname $0); pwd)
+export WORK_DIR=$ELIOT_DIR/work
+export CMAKE_URL=https://github.com/Kitware/CMake/releases/download/v3.15.2/cmake-3.15.2.tar.gz
+export SCONS_PPA_URL=http://repo.okay.com.mx/centos/7/x86_64/release//scons-2.3.0-1.el7.centos.noarch.rpm
+export GET_PIP_URL=https://bootstrap.pypa.io/get-pip.py
+export OPCUA_REPO=https://github.com/edgexfoundry-holding/protocol-opcua-c.git
+export DOCKER_NAME=${DOCKER_NAME:-"eliot/opc-ua"}
+export DOCKER_TAG=${DOCKER_TAG:-"latest"}
+
+# Clean and Create the work directory
+rm -rf $WORK_DIR
+mkdir -p $WORK_DIR
+
+yum install -y gcc git wget
+yum groupinstall -y 'Development Tools'
+# Get the package and source code
+cd $WORK_DIR
+wget $CMAKE_URL
+wget $SCONS_PPA_URL
+wget $GET_PIP_URL
+git clone $OPCUA_REPO
+
+# Install Package
+rpm -Uvh scons-2.3.0-1.el7.centos.noarch.rpm
+python get-pip.py
+
+# Build and Install camke
+tar xzf cmake-3.15.2.tar.gz
+cd ${WORK_DIR}/cmake-3.15.2
+./bootstrap
+make
+make install
+
+# Build the opc-ua server and client
+cd ${WORK_DIR}/protocol-opcua-c/
+./build.sh
+
+set +x
+echo "####################################################"
+echo "# If you want to start the server, follow below steps"
+echo "# cd ${WORK_DIR}/protocol-opcua-c/example/out"
+echo "# ./server"
+echo "####################################################"
+
source common.sh | tee eliotcommon.log
source k8smaster.sh | tee kubeadm.log
# Setup ELIOT Node
- setup_k8sworkers
+ oscheck_edge
}
setup_k8sworkers()
source k8smaster_centos.sh | tee kubeadm_centos.log
# Setup ELIOT Node
- setup_k8sworkers_centos
-
- kubectl apply -f cni/calico/rbac.yaml
- kubectl apply -f cni/calico/calico.yaml
-
+ oscheck_edge
}
}
+# ELIOT edgenode os check and setup edge node
+oscheck_edge()
+{
+ while read line
+ do
+ nodeinfo="${line}"
+ nodeusr=$(echo ${nodeinfo} | cut -d"|" -f1)
+ nodeip=$(echo ${nodeinfo} | cut -d"|" -f2)
+ nodepaswd=$(echo ${nodeinfo} | cut -d"|" -f3)
+ if sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} [ $OSPLATFORM="*Ubuntu*" ]; then
+ setup_k8sworkers
+ elif sshpass -p ${nodepaswd} ssh ${nodeusr}@${nodeip} [ $OSPLATFORM="*CentOS*" ]; then
+ setup_k8sworkers_centos
+ kubectl apply -f cni/calico/rbac.yaml
+ kubectl apply -f cni/calico/calico.yaml
+ fi
+ done < nodelist > /dev/null 2>&1
+}
+
# verify kubernetes setup by deploying nginx server.
verify_k8s_status(){