--- /dev/null
+[submodule "seba_charts"]
+ path = src/use_cases/seba_on_arm/src_repo/seba_charts
+ url = https://github.com/iecedge/seba_charts.git
+[submodule "kafka-exporter"]
+ path = src/use_cases/seba_on_arm/src_repo/kafka-exporter
+ url = https://github.com/iecedge/kafka_exporter.git
+[submodule "cp-docker-images"]
+ path = src/use_cases/seba_on_arm/src_repo/cp-docker-images
+ url = https://github.com/iecedge/cp-docker-images.git
+ branch = 4.1.2-post-arm64v8
+[submodule "kafka-prometheus-jmx-exporter"]
+ path = src/use_cases/seba_on_arm/src_repo/kafka-prometheus-jmx-exporter
+ url = https://github.com/iecedge/dockerfiles.git
+ branch = misc-dockerfiles
+[submodule "contrib"]
+ path = src/use_cases/seba_on_arm/src_repo/contrib
+ url = https://github.com/iecedge/contrib.git
+[submodule "zookeeper_exporter"]
+ path = src/use_cases/seba_on_arm/src_repo/zookeeper_exporter
+ url = https://github.com/iecedge/zookeeper_exporter.git
+ branch = v1.1.2-arm64v8
+[submodule "jmx-prometheus-exporter"]
+ path = src/use_cases/seba_on_arm/src_repo/jmx-prometheus-exporter
+ url = https://github.com/iecedge/docker-jmx-prometheus-exporter.git
+[submodule "xos"]
+ path = src/use_cases/seba_on_arm/src_repo/xos
+ url = https://github.com/iecedge/xos.git
+ branch = 2.1.25
+[submodule "xos-tosca"]
+ path = src/use_cases/seba_on_arm/src_repo/xos-tosca
+ url = https://github.com/iecedge/xos-tosca.git
+ branch = 1.1.6
+[submodule "cord-tester"]
+ path = src/use_cases/seba_on_arm/src_repo/cord-tester
+ url = https://github.com/iecedge/cord-tester.git
+[submodule "xos-gui"]
+ path = src/use_cases/seba_on_arm/src_repo/xos-gui
+ url = https://github.com/iecedge/xos-gui.git
+[submodule "xos-ws"]
+ path = src/use_cases/seba_on_arm/src_repo/xos-ws
+ url = https://github.com/iecedge/xos-rest-gw.git
+[submodule "tosca-loader"]
+ path = src/use_cases/seba_on_arm/src_repo/tosca-loader
+ url = https://github.com/iecedge/xos-tosca.git
+ branch = 1.1.5
+[submodule "kubernetes-synchronizer"]
+ path = src/use_cases/seba_on_arm/src_repo/kubernetes-synchronizer
+ url = https://github.com/iecedge/cord-kubernetes-service.git
+ branch = 2.1.25
+[submodule "sadis-server"]
+ path = src/use_cases/seba_on_arm/src_repo/sadis-server
+ url = https://github.com/iecedge/sadis-server.git
+[submodule "rcord-synchronizer"]
+ path = src/use_cases/seba_on_arm/src_repo/rcord-synchronizer
+ url = https://github.com/iecedge/rcord.git
+[submodule "fabric"]
+ path = src/use_cases/seba_on_arm/src_repo/fabric
+ url = https://github.com/iecedge/fabric.git
+[submodule "fabric-crossconnect"]
+ path = src/use_cases/seba_on_arm/src_repo/fabric-crossconnect
+ url = https://github.com/iecedge/fabric-crossconnect.git
+ branch = 1.1.4
+[submodule "onos-service"]
+ path = src/use_cases/seba_on_arm/src_repo/onos-service
+ url = https://github.com/iecedge/onos-service.git
+ branch = 2.0.7
+[submodule "olt-service"]
+ path = src/use_cases/seba_on_arm/src_repo/olt-service
+ url = https://github.com/iecedge/olt-service.git
+ branch = 2.1.14
+[submodule "elasticsearch-docker"]
+ path = src/use_cases/seba_on_arm/src_repo/elasticsearch-docker
+ url = https://github.com/iecedge/elasticsearch-docker.git
+ branch = 6.4.2-arm64v8
+[submodule "kubernetes"]
+ path = src/use_cases/seba_on_arm/src_repo/kubernetes
+ url = https://github.com/iecedge/kubernetes.git
+ branch = fluentd-elasticsearch-v2.3.1-arm64v8
+[submodule "kibana-docker"]
+ path = src/use_cases/seba_on_arm/src_repo/kibana-docker
+ url = https://github.com/iecedge/kibana-docker.git
+ branch = 6.4.2-arm64v8
+[submodule "logstash-docker"]
+ path = src/use_cases/seba_on_arm/src_repo/logstash-docker
+ url = https://github.com/iecedge/logstash-docker.git
+[submodule "logstash_exporter"]
+ path = src/use_cases/seba_on_arm/src_repo/logstash_exporter
+ url = https://github.com/iecedge/logstash_exporter.git
+[submodule "kafka-topic-exporter"]
+ path = src/use_cases/seba_on_arm/src_repo/kafka-topic-exporter
+ url = https://github.com/iecedge/kafka-topic-exporter.git
+[submodule "docker-curl"]
+ path = src/use_cases/seba_on_arm/src_repo/docker-curl
+ url = https://github.com/iecedge/docker-curl.git
+[submodule "k8s-sidecar"]
+ path = src/use_cases/seba_on_arm/src_repo/k8s-sidecar
+ url = https://github.com/iecedge/k8s-sidecar.git
+[submodule "alertmanager"]
+ path = src/use_cases/seba_on_arm/src_repo/alertmanager
+ url = https://github.com/iecedge/alertmanager.git
+ branch = v0.15.0-arm64v8
+[submodule "kube-state-metrics"]
+ path = src/use_cases/seba_on_arm/src_repo/kube-state-metrics
+ url = https://github.com/iecedge/kube-state-metrics.git
+[submodule "node_exporter"]
+ path = src/use_cases/seba_on_arm/src_repo/node_exporter
+ url = https://github.com/iecedge/node_exporter.git
+ branch = release-0.16-arm64v8
+[submodule "prometheus"]
+ path = src/use_cases/seba_on_arm/src_repo/prometheus
+ url = https://github.com/iecedge/prometheus.git
+ branch = v2.3.1-arm64v8
+[submodule "pushgateway"]
+ path = src/use_cases/seba_on_arm/src_repo/pushgateway
+ url = https://github.com/iecedge/pushgateway.git
+ branch = v0.5.2-arm64v8
+[submodule "freeradius"]
+ path = src/use_cases/seba_on_arm/src_repo/freeradius
+ url = https://github.com/iecedge/freeradius.git
+[submodule "voltha"]
+ path = src/use_cases/seba_on_arm/src_repo/voltha
+ url = https://github.com/iecedge/voltha.git
+ branch = jglr_move_to_cachengo_infra
+[submodule "etcd-operator-docker"]
+ path = src/use_cases/seba_on_arm/src_repo/etcd-operator-docker
+ url = https://github.com/iecedge/etcd-operator-docker.git
+[submodule "etcd"]
+ path = src/use_cases/seba_on_arm/src_repo/etcd
+ url = https://github.com/iecedge/etcd.git
+[submodule "onos"]
+ path = src/use_cases/seba_on_arm/src_repo/onos
+ url = https://github.com/iecedge/onos.git
+ branch = v1.13.5
+[submodule "beats-docker"]
+ path = src/use_cases/seba_on_arm/src_repo/beats-docker
+ url = https://github.com/iecedge/beats-docker.git
+ branch = 6.4.2-arm64v8
--- /dev/null
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
--- /dev/null
+##############################################################################
+# Copyright (c) 2019 Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### ubuntu1604-dev-48c-256g-1 installer descriptor file ###
+
+idf:
+ version: 0.1
+ net_config:
+ oob:
+ interface: 0
+ ip-range: ~
+ vlan: native
+ mask: 24
+ # All networks (except OOB) are virtual networks managed by `libvirt`
+ # Interface indexes are based on Fuel installer defaults
+ admin:
+ interface: 0 # when used, should be first vnet interface, untagged
+ vlan: native
+ network: 192.168.11.0
+ mask: 24
+ mgmt:
+ interface: 1 # when used, should be second vnet interface, untagged
+ vlan: native
+ network: 172.16.10.0
+ ip-range: 172.16.10.10-172.16.10.254 # Some IPs are in use by lab infra
+ mask: 24
+ storage:
+ interface: 4
+ vlan: native
+ network: 192.168.20.0
+ mask: 24
+ private:
+ interface: 2
+ vlan: native
+ network: 10.1.0.0
+ mask: 24
+ public:
+ interface: 3
+ vlan: native
+ network: 10.0.19.0
+ ip-range: 10.0.19.120-10.0.19.129
+ mask: 24
+ gateway: 10.0.19.254
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+ fuel:
+ jumphost:
+ bridges:
+ admin: ~
+ mgmt: ~
+ private: ~
+ public: 'public'
+ network:
+ interface_mtu: 9000
+ ntp_strata_host1: 1.se.pool.ntp.org
+ ntp_strata_host2: 0.se.pool.ntp.org
+ node:
+ # Ordered-list, index should be in sync with node index in PDF
+ - interfaces: &interfaces
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'enp1s0'
+ - 'enp2s0'
+ - 'enp3s0'
+ - 'enp4s0'
+ busaddr: &busaddr
+ # Bus-info reported by `ethtool -i ethX`
+ - '0000:01:00.0'
+ - '0000:02:00.0'
+ - '0000:03:00.0'
+ - '0000:04:00.0'
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
--- /dev/null
+##############################################################################
+# Copyright (c) 2019 Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### ubuntu1604-dev-48c-256g-2 installer descriptor file ###
+
+idf:
+ version: 0.1
+ net_config:
+ oob:
+ interface: 0
+ ip-range: ~
+ vlan: native
+ mask: 24
+ # All networks (except OOB) are virtual networks managed by `libvirt`
+ # Interface indexes are based on Fuel installer defaults
+ admin:
+ interface: 0 # when used, should be first vnet interface, untagged
+ vlan: native
+ network: 192.168.11.0
+ mask: 24
+ mgmt:
+ interface: 1 # when used, should be second vnet interface, untagged
+ vlan: native
+ network: 172.16.10.0
+ ip-range: 172.16.10.10-172.16.10.254 # Some IPs are in use by lab infra
+ mask: 24
+ storage:
+ interface: 4
+ vlan: native
+ network: 192.168.20.0
+ mask: 24
+ private:
+ interface: 2
+ vlan: native
+ network: 10.1.0.0
+ mask: 24
+ public:
+ interface: 3
+ vlan: native
+ network: 10.0.19.0
+ ip-range: 10.0.19.130-10.0.19.139
+ mask: 24
+ gateway: 10.0.19.254
+ dns:
+ - 8.8.8.8
+ - 8.8.4.4
+ fuel:
+ jumphost:
+ bridges:
+ admin: ~
+ mgmt: ~
+ private: ~
+ public: 'public'
+ network:
+ interface_mtu: 9000
+ ntp_strata_host1: 1.se.pool.ntp.org
+ ntp_strata_host2: 0.se.pool.ntp.org
+ node:
+ # Ordered-list, index should be in sync with node index in PDF
+ - interfaces: &interfaces
+ # Ordered-list, index should be in sync with interface index in PDF
+ - 'enp1s0'
+ - 'enp2s0'
+ - 'enp3s0'
+ - 'enp4s0'
+ busaddr: &busaddr
+ # Bus-info reported by `ethtool -i ethX`
+ - '0000:01:00.0'
+ - '0000:02:00.0'
+ - '0000:03:00.0'
+ - '0000:04:00.0'
+ - interfaces: *interfaces
+ busaddr: *busaddr
+ - interfaces: *interfaces
+ busaddr: *busaddr
--- /dev/null
+##############################################################################
+# Copyright (c) 2019 Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### ubuntu1604-dev-48c-256g-1 pod descriptor file ###
+
+version: 1.0
+details:
+ pod_owner: ENEA AB
+ contact: armband@enea.com
+ lab: ENEA lab
+ location: Stockholm, Sweden
+ type: production
+ link: https://wiki.akraino.org/display/AK/IEC+Internal+Verification+and+Validation+Lab+Setup
+##############################################################################
+jumphost:
+ name: ubuntu1604-dev-48c-256g-1
+ node:
+ type: baremetal
+ vendor: Cavium
+ model: ThunderX
+ arch: aarch64
+ cpus: 1
+ cpu_cflags: fp asimd evtstrm aes pmull sha1 sha2 crc32
+ cores: 48
+ memory: 128G
+ disks:
+ - name: disk1
+ disk_capacity: 500G
+ disk_type: ssd
+ disk_interface: sata
+ disk_rotation: 0
+ - name: disk2
+ disk_capacity: 2T
+ disk_type: hdd
+ disk_interface: sata
+ disk_rotation: 7200
+ os: ubuntu-16.04
+ remote_params: &remote_params
+ type: ipmi
+ versions:
+ - 2.0
+ user: ADMIN
+ pass: ADMIN
+ remote_management:
+ <<: *remote_params
+ address: 10.0.19.6
+ mac_address: "00:00:00:00:00:00"
+ interfaces:
+ # Connected, used only for admin/PXE, enP5p144s0
+ - mac_address: "00:00:00:00:00:00"
+ speed: 1gb
+ features: 'dpdk|sriov'
+ name: 'nic1'
+ # Connected, used for public VLAN, enP2p1s0f1
+ - mac_address: "00:00:00:00:00:00"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic2'
+##############################################################################
+nodes:
+ - name: node-1
+ node: &nodeparams
+ # Fuel overrides certain params (e.g. cpus, mem) based on node role later
+ type: virtual
+ vendor: libvirt
+ model: virt
+ arch: aarch64
+ cpus: 1
+ cpu_cflags: fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+ cores: 8
+ memory: 6G
+ disks: &disks
+ - name: 'disk1'
+ disk_capacity: 100G
+ disk_type: hdd
+ disk_interface: scsi # virtio-scsi
+ disk_rotation: 7200
+ remote_management: &remotemgmt
+ type: libvirt
+ user: changeme
+ pass: changeme
+ address: 'qemu:///system'
+ interfaces: &interfaces
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "00:00:00:00:00:00" # MACs will be assigned by libvirt
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "00:00:00:00:00:00"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "00:00:00:00:00:00"
+ vlan: native
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "00:00:00:00:00:00"
+ vlan: native
+ ############################################################################
+ - name: node-2
+ node: *nodeparams
+ disks: *disks
+ remote_management: *remotemgmt
+ interfaces: *interfaces
+ ############################################################################
+ - name: node-3
+ node: *nodeparams
+ disks: *disks
+ remote_management: *remotemgmt
+ interfaces: *interfaces
--- /dev/null
+##############################################################################
+# Copyright (c) 2019 Enea AB and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+### ubuntu1604-dev-48c-256g-2 pod descriptor file ###
+
+version: 1.0
+details:
+ pod_owner: ENEA AB
+ contact: armband@enea.com
+ lab: ENEA lab
+ location: Stockholm, Sweden
+ type: production
+ link: https://wiki.akraino.org/display/AK/IEC+Internal+Verification+and+Validation+Lab+Setup
+##############################################################################
+jumphost:
+ name: ubuntu1604-dev-48c-256g-2
+ node:
+ type: baremetal
+ vendor: Cavium
+ model: ThunderX
+ arch: aarch64
+ cpus: 1
+ cpu_cflags: fp asimd evtstrm aes pmull sha1 sha2 crc32
+ cores: 48
+ memory: 128G
+ disks:
+ - name: disk1
+ disk_capacity: 500G
+ disk_type: ssd
+ disk_interface: sata
+ disk_rotation: 0
+ - name: disk2
+ disk_capacity: 2T
+ disk_type: hdd
+ disk_interface: sata
+ disk_rotation: 7200
+ os: ubuntu-16.04
+ remote_params: &remote_params
+ type: ipmi
+ versions:
+ - 2.0
+ user: ADMIN
+ pass: ADMIN
+ remote_management:
+ <<: *remote_params
+ address: 10.0.19.6
+ mac_address: "00:00:00:00:00:00"
+ interfaces:
+ # Connected, used only for admin/PXE, enP5p144s0
+ - mac_address: "00:00:00:00:00:00"
+ speed: 1gb
+ features: 'dpdk|sriov'
+ name: 'nic1'
+ # Connected, used for public VLAN, enP2p1s0f1
+ - mac_address: "00:00:00:00:00:00"
+ speed: 10gb
+ features: 'dpdk|sriov'
+ name: 'nic2'
+##############################################################################
+nodes:
+ - name: node-1
+ node: &nodeparams
+ # Fuel overrides certain params (e.g. cpus, mem) based on node role later
+ type: virtual
+ vendor: libvirt
+ model: virt
+ arch: aarch64
+ cpus: 1
+ cpu_cflags: fp asimd evtstrm aes pmull sha1 sha2 crc32 cpuid
+ cores: 8
+ memory: 6G
+ disks: &disks
+ - name: 'disk1'
+ disk_capacity: 100G
+ disk_type: hdd
+ disk_interface: scsi # virtio-scsi
+ disk_rotation: 7200
+ remote_management: &remotemgmt
+ type: libvirt
+ user: changeme
+ pass: changeme
+ address: 'qemu:///system'
+ interfaces: &interfaces
+ - name: 'nic1'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "00:00:00:00:00:00" # MACs will be assigned by libvirt
+ vlan: native
+ - name: 'nic2'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "00:00:00:00:00:00"
+ vlan: native
+ - name: 'nic3'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "00:00:00:00:00:00"
+ vlan: native
+ - name: 'nic4'
+ speed: 10gb
+ features: 'dpdk|sriov'
+ mac_address: "00:00:00:00:00:00"
+ vlan: native
+ ############################################################################
+ - name: node-2
+ node: *nodeparams
+ disks: *disks
+ remote_management: *remotemgmt
+ interfaces: *interfaces
+ ############################################################################
+ - name: node-3
+ node: *nodeparams
+ disks: *disks
+ remote_management: *remotemgmt
+ interfaces: *interfaces
--- /dev/null
+##############################################################################
+# Copyright (c) 2019 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+from docs_conf.conf import * # noqa: F401,F403
--- /dev/null
+##############################################################################
+# Copyright (c) 2019 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+---
+project_cfg: akraino
+project: iec
--- /dev/null
+.. _iec:
+
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Akraino Project, Inc. and its contributors
+
+=====================
+Integrated Edge Cloud
+=====================
+ 1,1 Top
+.. toctree::
+ :numbered:
+ :maxdepth: 2
+
+ release/release-notes/index
+ release/installation/index
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Akraino Project, Inc. and its contributors
+
+.. _iec-installation:
+
+.. toctree::
+ :maxdepth: 2
+
+ installation.instruction.rst
+
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Akraino Project, Inc. and its contributors
+
+************************************
+Akraino IEC Installation Instruction
+************************************
+
+
+Introduction
+============
+
+License
+=======
+
+How to use this document
+========================
+
+Deployment Architecture
+=======================
+
+Pre-Installation Requirements
+=============================
+
+Hardware Requirements
+---------------------
+
+Minimum Hardware Requirements
+`````````````````````````````
+
+Recommended Hardware Requirements
+`````````````````````````````````
+
+Software Prerequisites
+----------------------
+
+Database Prerequisites
+----------------------
+
+Schema scripts
+``````````````
+
+Other Installation Requirements
+-------------------------------
+
+Jump Host Requirements
+``````````````````````
+
+Network Requirements
+````````````````````
+
+Bare Metal Node Requirements
+````````````````````````````
+
+Execution Requirements (Bare Metal Only)
+````````````````````````````````````````
+
+Installation High-Level Overview
+================================
+
+Bare Metal Deployment Guide
+---------------------------
+
+Install Bare Metal Jump Host
+````````````````````````````
+
+Creating a Node Inventory File
+``````````````````````````````
+
+Creating the Settings Files
+```````````````````````````
+
+Running
+```````
+
+Virtual Deployment Guide
+------------------------
+
+Standard Deployment Overview
+````````````````````````````
+
+Snapshot Deployment Overview
+````````````````````````````
+
+Special Requirements for Virtual Deployments
+````````````````````````````````````````````
+
+Install Jump Host
+'''''''''''''''''
+
+Verifying the Setup - VMs
+'''''''''''''''''''''''''
+
+Upstream Deployment Guide
+-------------------------
+
+Upstream Deployment Key Features
+````````````````````````````````
+
+Special Requirements for Upstream Deployments
+`````````````````````````````````````````````
+
+Scenarios and Deploy Settings for Upstream Deployments
+``````````````````````````````````````````````````````
+
+Including Upstream Patches with Deployment
+``````````````````````````````````````````
+
+Running
+```````
+
+Interacting with Containerized Overcloud
+````````````````````````````````````````
+
+Verifying the Setup
+===================
+
+OpenStack Verification
+======================
+
+Developer Guide and Troubleshooting
+===================================
+
+Utilization of Images
+---------------------
+
+Post-deployment Configuration
+-----------------------------
+
+OpenDaylight Integration
+------------------------
+
+Debugging Failures
+------------------
+
+Reporting a Bug
+---------------
+
+Uninstall Guide
+===============
+
+Troubleshooting
+===============
+
+Error Message Guide
+-------------------
+
+Maintenance
+===========
+
+Frequently Asked Questions
+==========================
+
+License
+=======
+
+References
+==========
+
+Definitions, acronyms and abbreviations
+=======================================
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Akraino Project, Inc. and its contributors
+
+.. _iec-releasenotes:
+
+.. toctree::
+ :maxdepth: 2
+
+ release-notes.rst
--- /dev/null
+.. This work is licensed under a Creative Commons Attribution 4.0 International License.
+.. http://creativecommons.org/licenses/by/4.0
+.. (c) Akraino Project, Inc. and its contributors
+
+********************************************
+Akraino Release Notes for the IEC Blue Print
+********************************************
+
+Summary
+=======
+
+Release Data
+============
+
+Version change
+--------------
+
+Module version changes
+``````````````````````
+
+Document Version Changes
+````````````````````````
+
+Deliverable
+-----------
+
+Software Deliverable
+````````````````````
+
+Documentation Deliverable
+`````````````````````````
+
+Fixed Issues and Bugs
+`````````````````````
+
+Known Limitations, Issues and Workarounds
+=========================================
+
+System Limitations
+------------------
+
+Known Issues
+------------
+
+Workarounds
+-----------
+
+References
+==========
--- /dev/null
+##############################################################################
+# Copyright (c) 2019 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+lfdocs-conf
+# Uncomment the following line if your project uses Sphinx to document
+# HTTP APIs
+# sphinxcontrib-httpdomain
+++ /dev/null
-#!/bin/bash -ex
-
-CLUSTER_IP=${1:-172.16.1.136} # Align with the value in our K8s setup script
-CALICO_URI_ROOT=https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation
-
-# Install the Etcd Database
-if [ "$(uname -m)" == 'aarch64' ]; then
- ETCD_YAML=https://raw.githubusercontent.com/Jingzhao123/arm64TemporaryCalico/temporay_arm64/v3.3/getting-started/kubernetes/installation/hosted/etcd-arm64.yaml
-else
- ETCD_YAML=${CALICO_URI_ROOT}/hosted/etcd.yaml
-fi
-wget -O etcd.yaml "${ETCD_YAML}"
-sed -i "s/10.96.232.136/${CLUSTER_IP}/" etcd.yaml
-kubectl apply -f etcd.yaml
-
-# Install the RBAC Roles required for Calico
-kubectl apply -f "${CALICO_URI_ROOT}/rbac.yaml"
-
-# Install Calico to system
-wget -O calico.yaml "${CALICO_URI_ROOT}/hosted/calico.yaml"
-sed -i "s/10.96.232.136/${CLUSTER_IP}/" calico.yaml
-if [ "$(uname -m)" == 'aarch64' ]; then
- sed -i "s/quay.io\/calico/calico/" calico.yaml
-fi
-# FIXME: IP_AUTODETECTION_METHOD?
-kubectl apply -f calico.yaml
-
-# Remove the taints on master node
-kubectl taint nodes --all node-role.kubernetes.io/master- || true
+++ /dev/null
-#!/bin/bash -ex
-
-VERSION='v2.12.3'
-if [ "$(uname -m)" == 'aarch64' ]; then
- FLAVOR='linux-arm64'
-else
- FLAVOR='linux-amd64'
-fi
-
-URI_ROOT='https://storage.googleapis.com/kubernetes-helm'
-TGZ_NAME="helm-${VERSION}-${FLAVOR}.tar.gz"
-
-if [ ! -e /usr/bin/helm ] || [ ! -e /usr/bin/tiller ]; then
- wget -O "/tmp/${TGZ_NAME}" "${URI_ROOT}/${TGZ_NAME}"
- sudo tar xpPf "/tmp/${TGZ_NAME}" --overwrite \
- --transform "s|${FLAVOR}|/usr/bin|" "${FLAVOR}/"{helm,tiller}
- rm -f "/tmp/${TGZ_NAME}"
-fi
--- /dev/null
+Foreword:
+---------
+This is a set of Openstack Heat templates which creates a simple topology of
+virtual machines to be used to deploy Kubernetes and Calico.
+
+It consists of one master VM and 2 optional slave VMs. In the future it might
+be possible to configure the number of slaves but for now it is fixed.
+
+
+Prerequisites:
+--------------
+In order to run these templates, you need an Openstack deployment (at least
+Ocata version, later is preferred), either a single node installation or
+multinode).
+
+The job of the Heat stacks is to spawn either 1 or 3 VMs which will form a
+Kubernetes cluster. The base image is required to exist, by default the stacks
+will expect a Glance image named "xenial" to exist.
+
+It is required to upload an image prior to using the templates. Currently the
+templates operate on the assumption that an Ubuntu Xenial cloud image will be
+used, as such it installs the required packages using apt.
+
+See the main control.sh script to start/stop the set of stacks and various
+run-time options, like DPDK support.
+
+
+Usage:
+------
+For a DPDK enabled deployment, it is usually necessary to pass an extra metadata
+in the flavor (e.g. hw:mem_page_size=large). For DPDK usecase you also have to
+create a host aggregate which has the pinned=true metadata and add the desired
+compute nodes to this host aggregate.
+
+For floating IP support, you need to specify the name of the external network,
+otherwise the script will use the default "external".
+
+Example of running the script on a DPDK deployment:
+ has_dpdk=true external_net=ext_net ./control.sh start
+
+The set of templates currently define three stacks, which can be skipped from
+starting or stopping, if so desired. This makes it useful to skip deleting
+the nets or for starting the setup using one host only (master). E.g:
+ skip_k8s_net=1 ./control.sh stop
+ skip_k8s_slaves=1 ./control.sh start
+
+Networking:
+-----------
+Have a look at k8s_net.yaml for the network configurations.
+
+Currently the Heat templates define 2 networks:
+- k8s_mgmt_net: this is primarily used for sshing into the node, but it also
+ serves as the access to the external network. Thus the floating IPs (which are
+ activated by default) will be assigned to the ports from this network
+- k8s_int_net: kubernetes internal network, which is used by the nodes to join
+ the cluster.
+
+Separating the traffic into two networks makes sense in an Openstack environment
+by hiding the internal traffic from the outside world.
+Thus, for accessing the services inside the clusters, it will be required to use
+the floating IPs assigned to the Kubernetes servers.
+
+In terms of CNI, there will be two additional networks involved, which are
+defined in k8s_net.yaml. These networks are not visible from outside of the Heat
+stacks, Kubernetes and Calico will encapsulate packets on these networks using
+IP-in-IP. In fact, to Openstack these are virtual networks, the only reason to
+have them in k8s_pod_net.yaml is to have a central view of all the network
+parameters.
+The two networks are described by Heat stack output variables, as follows:
+- k8s_pod_net_cidr: the POD network, passed to kubeadm init --pod-network-cidr
+- k8s_svc_net_cidr: the service network, passed to kubeadm init --service-cidr
+
+
+Calico networking:
+------------------
+In terms of Calico, k8s_net.yaml defines yet another stack output variable:
+- k8s_cluster_ip: corresponds to the etcd_endpoints parameter in calico.yaml
+
+
+Network security:
+-----------------
+For the moment, for ease of operation, the stacks ports will have port security
+disabled. It should be possible to enable it, and provide a set of security
+groups rule to allow all TCP and UDP traffic for the internal network.
+
+
+Cluster setup:
+--------------
+The clusters configures itself automatically and installs the base IEC platform
+together with the needed resources for Helm. SEBA or other applications will
+have to be installed manually afterwards.
+
+For the K8s cluster setup, the Master VM will print the join command in a file
+in /home/ubuntu/joincmd. Then the slave VMs will connect to the Master VM using
+ssh and read the joincmd file.
+
+All of these are achieved by using cloud-init scripts that run at startup. You
+can follow the progress of the init scripts by looking at the console log, which
+right now are very verbose.
+After the setup is completed, you can look for the joincmd string in the output.
+
+
+Using the cluster:
+------------------
+Once the setup is complete, you can login to the k8s_master VM. Use the Horizon
+interface or ssh into the floating ip, using the default credentials:
+ubuntu:ubuntu
+
+A public key is also generated, and a private key saved in a file called
+ak-key.pem but for now password logins are permitted for ease of operation.
+
+Once logged into the master VM, you need to become root.
+ sudo su -
+
+From here it is possible to run the usual kubernetes and helm tools, thanks to
+having the KUBECONFIG env variable exported through /root/.profile.
+
+It is also possible to use kubernetes as non-root, in which case you need to
+manually create ~/.kube/ and copy the kubernetes config:
+ mkdir -p $HOME/.kube
+ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+ sudo chown $(id -u):$(id -g) $HOME/.kube/config
+
+The most basic operation you can run is verifying the nodes in the cluster:
+ kubectl get nodes
+ kubcetl describe node k8s-master
--- /dev/null
+#!/bin/sh
+
+# shellcheck disable=SC2086
+
+# set DPDK if available
+has_dpdk=${has_dpdk:-"false"}
+
+################################################################
+# Stack parameters
+base_img=${base_img:-"xenial"}
+key_name=${key_name:-"ak-key"}
+k8s_master_vol=${k8s_master_vol:-"k8s_master_vol"}
+external_net=${external_net:-"external"}
+k8s_user=${k8s_user:-"ubuntu"}
+k8s_password=${k8s_password:-"ubuntu"}
+has_dpdk_param=
+
+floating_ip_param="--parameter public_ip_pool=$external_net"
+
+if [ "$has_dpdk" = true ]; then
+ has_dpdk_param="--parameter has_dpdk=true"
+fi
+
+################################################################
+
+set -ex
+
+retries=5
+
+if [ -z "$OS_AUTH_URL" ]; then
+ echo "OS_AUTH_URL not set; aborting"
+ exit 1
+fi
+
+if ! [ -f ak-key.pem ]
+then
+ nova keypair-add "$key_name" > "$key_name".pem
+ chmod 600 ak-key.pem
+fi
+
+skip_k8s_net=${skip_k8s_net:-}
+skip_k8s_master=${skip_k8s_master:-}
+skip_k8s_slaves=${skip_k8s_slaves:-}
+
+stack_k8s_net=
+stack_k8s_master=
+stack_k8s_slaves=
+
+case $1 in
+start|stop)
+ cmd=$1
+ shift
+ ;;
+restart)
+ shift
+ tries=0
+ while ! $0 stop "$@"; do
+ tries=$((tries+1))
+ if [ $tries -gt $retries ]; then
+ echo "Unable to stop demo, exiting"
+ exit 1
+ fi
+ done
+ $0 start "$@"
+ exit $?
+ ;;
+*)
+ echo "Control script for managing a simple K8s cluster of VMs using Heat"
+ echo "Available stacks:"
+ echo " - net - all the required networks and subnets"
+ echo " - k8s_master - K8s master VM"
+ echo " - k8s_slaves - configurable number of K8s slave VMs"
+ echo "Use skip_<stack> to skip starting/stopping stacks, e.g."
+ echo "#:~ > skip_k8s_net=1 ./$0 stop"
+ echo "usage: $0 [start|stop] [k8s_net] [k8s_master] [k8s_slaves]"
+ exit 1
+ ;;
+esac
+
+if [ $# -gt 0 ]; then
+ skip_k8s_net=1
+ while [ $# -gt 0 ]; do
+ eval unset skip_"$1"
+ shift
+ done
+fi
+
+# check OS status
+tries=0
+while ! openstack compute service list > /dev/null 2>&1; do
+ tries=$((tries+1))
+ if [ $tries -gt $retries ]; then
+ echo "Unable to check Openstack health, exiting"
+ exit 2
+ fi
+ sleep 5
+done
+
+for stack in $(openstack stack list -f value -c "Stack Name"); do
+ echo "$stack" | grep -sq -e '^[a-zA-Z0-9_]*$' && eval stack_"$stack"=1
+done
+
+case $cmd in
+start)
+ if [ -z "$stack_k8s_net" ] && [ -z "$skip_k8s_net" ]; then
+ echo "Starting k8s_net"
+ openstack stack create --wait \
+ --parameter external_net="$external_net" \
+ -t k8s_net.yaml k8s_net
+ # Might need to wait for the networks to become available
+ # sleep 5
+ fi
+
+# master_vol=$(openstack volume show $k8s_master_vol -f value -c id)
+# --parameter volume_id=$master_vol \
+
+ k8s_master_ip=$(openstack stack output show k8s_net k8s_master_ip -f value -c output_value)
+ k8s_pod_net_cidr=$(openstack stack output show k8s_net k8s_pod_net_cidr -f value -c output_value)
+ k8s_svc_net_cidr=$(openstack stack output show k8s_net k8s_svc_net_cidr -f value -c output_value)
+ k8s_cluster_ip=$(openstack stack output show k8s_net k8s_cluster_ip -f value -c output_value)
+ if [ -z "$stack_k8s_master" ] && [ -z "$skip_k8s_master" ]; then
+ echo "Starting Kubernetes master"
+ openstack stack create --wait \
+ --parameter key_name="$key_name" \
+ --parameter k8s_master_ip="$k8s_master_ip" \
+ --parameter k8s_pod_net_cidr="$k8s_pod_net_cidr" \
+ --parameter k8s_svc_net_cidr="$k8s_svc_net_cidr" \
+ --parameter k8s_cluster_ip="$k8s_cluster_ip" \
+ --parameter k8s_user="$k8s_user" \
+ --parameter k8s_password="$k8s_password" \
+ $floating_ip_param \
+ $has_dpdk_param \
+ -t k8s_master.yaml k8s_master
+ fi
+
+ if [ -z "$stack_k8s_slaves" ] && [ -z "$skip_k8s_slaves" ]; then
+ echo "Starting Kubernetes slaves"
+ openstack stack create --wait \
+ --parameter key_name="$key_name" \
+ --parameter k8s_master_ip="$k8s_master_ip" \
+ --parameter k8s_pod_net_cidr="$k8s_pod_net_cidr" \
+ --parameter k8s_svc_net_cidr="$k8s_svc_net_cidr" \
+ --parameter k8s_cluster_ip="$k8s_cluster_ip" \
+ --parameter k8s_user="$k8s_user" \
+ --parameter k8s_password="$k8s_password" \
+ $floating_ip_param \
+ $has_dpdk_param \
+ -t k8s_slaves.yaml k8s_slaves
+ fi
+
+ openstack stack list
+ ;;
+stop)
+ if [ -n "$stack_k8s_slaves" ] && [ -z "$skip_k8s_slaves" ]; then
+ echo "Stopping Kubernetes slaves"
+ openstack stack delete --yes --wait k8s_slaves
+ fi
+
+ if [ -n "$stack_k8s_master" ] && [ -z "$skip_k8s_master" ]; then
+ echo "Stopping Kubernetes master"
+ openstack stack delete --yes --wait k8s_master
+ fi
+
+ if [ -n "$stack_k8s_net" ] && [ -z "$skip_k8s_net" ]; then
+ echo "Stopping k8s_net"
+ openstack stack delete --yes --wait k8s_net
+ fi
+
+ openstack stack list
+ ;;
+esac
--- /dev/null
+# yamllint disable-line rule:document-start
+heat_template_version: 2016-10-14
+
+description: "K8 master VM"
+
+parameters:
+ key_name:
+ type: string
+ description: management ssh key
+ default: 'ak-key'
+
+ k8s_master_hostname:
+ type: string
+ description: Hostname of the K8s master node
+ default: "k8s-master"
+
+ k8s_master_vol:
+ type: string
+ default: "k8s_master_vol"
+
+ k8s_mgmt_net:
+ type: string
+ description: management network
+ default: "k8s_mgmt_net"
+
+ k8s_int_net:
+ type: string
+ description: Kubernetes service network
+ default: "k8s_int_net"
+
+ k8s_master_ip:
+ type: string
+ description: k8s_master management IP (fixed)
+
+ k8s_pod_net_cidr:
+ type: string
+ description: k8 pod_net cidr used for setting up k8s cluster
+
+ k8s_svc_net_cidr:
+ type: string
+ description: k8 pod_net cidr used for setting up k8s cluster
+
+ k8s_cluster_ip:
+ type: string
+ description: k8 service IP addr used for setting up k8s cluster
+
+ k8s_user:
+ type: string
+ description: User id to connect to the VMs (ssh)
+ default: "ubuntu"
+
+ k8s_password:
+ type: string
+ description: Access password for the user to connect to the VMs (ssh)
+ default: "ubuntu"
+
+ public_ip_pool:
+ type: string
+ description: Public IP pool
+ default: "external"
+
+ enable_floating_ip:
+ type: boolean
+ default: true
+
+ has_dpdk:
+ type: boolean
+ default: false
+
+conditions:
+ cond_floating_ip: {equals: [{get_param: enable_floating_ip}, true]}
+ has_dpdk: {equals: [{get_param: has_dpdk}, true]}
+
+resources:
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 16384
+ vcpus: 4
+ disk: 10
+
+ flavor_dpdk:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 16384
+ vcpus: 8
+ disk: 40
+ extra_specs:
+ "hw:mem_page_size": large
+ "hw:cpu_policy": dedicated
+ "aggregate_instance_extra_specs:pinned": "true"
+ "hw:numa_node.0": 0
+ "hw:numa_nodes": 1
+
+ server_fip:
+ type: OS::Nova::FloatingIP
+ condition: cond_floating_ip
+ properties:
+ pool: {get_param: public_ip_pool}
+
+ server_association_fip:
+ type: OS::Nova::FloatingIPAssociation
+ condition: cond_floating_ip
+ properties:
+ floating_ip: {get_resource: server_fip}
+ server_id: {get_resource: server}
+
+ mgmt_port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: k8s_mgmt_net}
+ port_security_enabled: false
+ # security_groups:
+ # - {get_resource: server_security_group}
+
+ int_net_port:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: k8s_int_net}
+ port_security_enabled: false
+ # security_groups:
+ # - {get_resource: server_security_group}
+ fixed_ips: [{"ip_address": {get_param: k8s_master_ip}}]
+
+ server_cloudinit_config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ password: ubuntu
+ chpasswd: {expire: false}
+ ssh_pwauth: true
+ manage_etc_hosts: true
+ disable_root: false
+
+ server_config:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ config:
+ str_replace:
+ template: {get_file: k8s_master_init.sh}
+ params:
+ k8s_master_hostname: {get_param: k8s_master_hostname}
+ k8s_master_ip: {get_param: k8s_master_ip}
+ k8s_pod_net_cidr: {get_param: k8s_pod_net_cidr}
+ k8s_svc_net_cidr: {get_param: k8s_svc_net_cidr}
+ k8s_cluster_ip: {get_param: k8s_cluster_ip}
+ k8s_user: {get_param: k8s_user}
+
+ server_user_data:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: {get_resource: server_cloudinit_config}
+ - config: {get_resource: server_config}
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: Security group for ssh and icmp
+ name: test-security-group
+ rules: [
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: tcp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: udp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}
+ ]
+
+ # k8s_master_volume:
+ # type: OS::Cinder::Volume
+ # properties:
+ # description: 'user: Volume for Node1'
+ # image: "xenial"
+ # name: {get_param: k8s_master_vol}
+ # size: 20
+ # availability_zone: nova
+
+ server:
+ type: OS::Nova::Server
+ properties:
+ name: k8s-master
+ key_name: {get_param: key_name}
+ flavor: {get_resource: {if: ["has_dpdk", "flavor_dpdk", "flavor"]}}
+ image: "xenial"
+ # block_device_mapping: [
+ # {device_name: "vda",
+ # volume_id:
+ # {get_resource: k8s_master_volume},
+ # delete_on_termination: true
+ # }
+ # ]
+ user_data: {get_resource: server_user_data}
+ user_data_format: RAW
+ networks:
+ - port: {get_resource: mgmt_port}
+ - port: {get_resource: int_net_port}
--- /dev/null
+#!/bin/bash
+set -ex
+sed -i -e 's/^\([0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+\)\([\t ]\+\)\(k8s_master_hostname.*$\)/k8s_master_ip\2\3/g' /etc/hosts
+apt update
+pwd
+# Looks like cloud-init does not set $HOME, so we can hack it into thinking it's /root
+HOME=${HOME:-/root}
+export HOME
+git clone https://gerrit.akraino.org/r/iec
+cd iec/src/foundation/scripts
+./k8s_common.sh
+./k8s_master.sh k8s_master_ip k8s_pod_net_cidr k8s_svc_net_cidr
+. ${HOME}/.profile
+./setup-cni.sh k8s_cluster_ip k8s_pod_net_cidr
+token=$(kubeadm token list --skip-headers | awk 'END{print $1}')
+shaid=$(openssl x509 -in /etc/kubernetes/pki/ca.crt -noout -pubkey | openssl rsa -pubin -outform DER 2>/dev/null | sha256sum | cut -d ' ' -f1)
+echo "kubeadm join k8s_master_ip:6443 --token $token --discovery-token-ca-cert-hash sha256:$shaid" > /home/k8s_user/joincmd
+cat /home/k8s_user/joincmd
+./nginx.sh
+./helm.sh
--- /dev/null
+# yamllint disable-line rule:document-start
+heat_template_version: 2015-04-30
+
+parameters:
+ external_net:
+ type: string
+ description: Name of the external network
+ default: "external"
+
+resources:
+
+ k8s_mgmt_net:
+ type: OS::Neutron::Net
+ properties:
+ name: "k8s_mgmt_net"
+
+ k8s_mgmt_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: {get_resource: k8s_mgmt_net}
+ cidr: "192.168.11.0/24"
+ gateway_ip: 192.168.11.254
+ ip_version: 4
+
+ k8s_mgmt_router:
+ type: OS::Neutron::Router
+ properties:
+ external_gateway_info: {network: {get_param: external_net}}
+
+ k8s_mgmt_interface:
+ type: OS::Neutron::RouterInterface
+ properties:
+ router_id: {get_resource: k8s_mgmt_router}
+ subnet: {get_resource: k8s_mgmt_subnet}
+
+ k8s_int_net:
+ type: OS::Neutron::Net
+ properties:
+ name: "k8s_int_net"
+
+ k8s_int_subnet:
+ type: OS::Neutron::Subnet
+ properties:
+ network_id: {get_resource: k8s_int_net}
+ cidr: "172.16.10.0/24"
+ gateway_ip: null
+ allocation_pools:
+ - start: 172.16.10.10
+ end: 172.16.10.253
+ ip_version: 4
+ enable_dhcp: false
+
+outputs:
+ k8s_master_ip:
+ value: "172.16.10.36"
+ k8s_pod_net_cidr:
+ value: "100.100.0.0/16"
+ k8s_svc_net_cidr:
+ value: "172.16.1.0/24"
+ k8s_cluster_ip:
+ value: "172.16.1.136"
--- /dev/null
+# yamllint disable-line rule:document-start
+heat_template_version: 2016-10-14
+
+description: "K8 slaves VM"
+
+parameters:
+ key_name:
+ type: string
+ description: management ssh key
+ default: 'ak-key'
+
+ k8s_slave0_hostname:
+ type: string
+ description: Hostname of the K8s slave0 node
+ default: "k8s-slave0"
+
+ k8s_slave1_hostname:
+ type: string
+ description: Hostname of the K8s slave0 node
+ default: "k8s-slave1"
+
+ k8s_mgmt_net:
+ type: string
+ description: management network
+ default: "k8s_mgmt_net"
+
+ k8s_int_net:
+ type: string
+ description: Kubernetes service network
+ default: "k8s_int_net"
+
+ k8s_master_ip:
+ type: string
+ description: k8s_master management IP (fixed)
+
+ k8s_slave0_ip:
+ type: string
+ description: k8s_master management IP (fixed)
+ default: "172.16.10.37"
+
+ k8s_slave1_ip:
+ type: string
+ description: k8s_master management IP (fixed)
+ default: "172.16.10.38"
+
+ k8s_pod_net_cidr:
+ type: string
+ description: k8 pod_net cidr used for setting up k8s cluster
+
+ k8s_svc_net_cidr:
+ type: string
+ description: k8 pod_net cidr used for setting up k8s cluster
+
+ k8s_cluster_ip:
+ type: string
+ description: k8 service IP addr used for setting up k8s cluster
+
+ k8s_user:
+ type: string
+ description: User id to connect to the VMs (ssh)
+ default: "ubuntu"
+
+ k8s_password:
+ type: string
+ description: Access password for the user to connect to the VMs (ssh)
+ default: "ubuntu"
+
+ public_ip_pool:
+ type: string
+ description: Public IP pool
+ default: "external"
+
+ enable_floating_ip:
+ type: boolean
+ default: true
+
+ has_dpdk:
+ type: boolean
+ default: false
+
+conditions:
+ cond_floating_ip: {equals: [{get_param: enable_floating_ip}, true]}
+ has_dpdk: {equals: [{get_param: has_dpdk}, true]}
+
+resources:
+ flavor:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 10240
+ vcpus: 4
+ disk: 10
+
+ flavor_dpdk:
+ type: OS::Nova::Flavor
+ properties:
+ ram: 10240
+ vcpus: 8
+ disk: 40
+ extra_specs:
+ "hw:mem_page_size": large
+ "hw:cpu_policy": dedicated
+ "aggregate_instance_extra_specs:pinned": "true"
+ "hw:numa_node.0": 0
+ "hw:numa_nodes": 1
+
+ server_cloudinit_config:
+ type: OS::Heat::CloudConfig
+ properties:
+ cloud_config:
+ password: ubuntu
+ chpasswd: {expire: false}
+ ssh_pwauth: true
+ manage_etc_hosts: true
+ disable_root: false
+
+ server_config0:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ config:
+ str_replace:
+ template: {get_file: k8s_slaves_init.sh}
+ params:
+ k8s_slave_hostname: {get_param: k8s_slave0_hostname}
+ k8s_master_ip: {get_param: k8s_master_ip}
+ k8s_slave_ip: {get_param: k8s_slave0_ip}
+ k8s_pod_net_cidr: {get_param: k8s_pod_net_cidr}
+ k8s_svc_net_cidr: {get_param: k8s_svc_net_cidr}
+ k8s_cluster_ip: {get_param: k8s_cluster_ip}
+ k8s_user: {get_param: k8s_user}
+ k8s_password: {get_param: k8s_password}
+
+ server_user_data0:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: {get_resource: server_cloudinit_config}
+ - config: {get_resource: server_config0}
+
+ server_config1:
+ type: OS::Heat::SoftwareConfig
+ properties:
+ config:
+ str_replace:
+ template: {get_file: k8s_slaves_init.sh}
+ params:
+ k8s_slave_hostname: {get_param: k8s_slave1_hostname}
+ k8s_master_ip: {get_param: k8s_master_ip}
+ k8s_slave_ip: {get_param: k8s_slave1_ip}
+ k8s_pod_net_cidr: {get_param: k8s_pod_net_cidr}
+ k8s_svc_net_cidr: {get_param: k8s_svc_net_cidr}
+ k8s_cluster_ip: {get_param: k8s_cluster_ip}
+ k8s_user: {get_param: k8s_user}
+ k8s_password: {get_param: k8s_password}
+
+ server_user_data1:
+ type: OS::Heat::MultipartMime
+ properties:
+ parts:
+ - config: {get_resource: server_cloudinit_config}
+ - config: {get_resource: server_config1}
+
+ server_security_group:
+ type: OS::Neutron::SecurityGroup
+ properties:
+ description: Security group for ssh and icmp
+ name: test-security-group
+ rules: [
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: tcp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0,
+ protocol: udp,
+ port_range_min: 1,
+ port_range_max: 65535},
+ {remote_ip_prefix: 0.0.0.0/0, protocol: icmp}
+ ]
+
+ slave_fip0:
+ type: OS::Nova::FloatingIP
+ condition: cond_floating_ip
+ properties:
+ pool: {get_param: public_ip_pool}
+
+ server_association_fip0:
+ type: OS::Nova::FloatingIPAssociation
+ condition: cond_floating_ip
+ properties:
+ floating_ip: {get_resource: slave_fip0}
+ server_id: {get_resource: slave0}
+
+ slave_fip1:
+ type: OS::Nova::FloatingIP
+ condition: cond_floating_ip
+ properties:
+ pool: {get_param: public_ip_pool}
+
+ server_association_fip1:
+ type: OS::Nova::FloatingIPAssociation
+ condition: cond_floating_ip
+ properties:
+ floating_ip: {get_resource: slave_fip1}
+ server_id: {get_resource: slave1}
+
+ mgmt_port0:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: k8s_mgmt_net}
+ port_security_enabled: false
+ # security_groups:
+ # - {get_resource: server_security_group}
+
+ int_net_port0:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: k8s_int_net}
+ port_security_enabled: false
+ # security_groups:
+ # - {get_resource: server_security_group}
+ fixed_ips: [{"ip_address": {get_param: k8s_slave0_ip}}]
+
+ mgmt_port1:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: k8s_mgmt_net}
+ port_security_enabled: false
+ # security_groups:
+ # - {get_resource: server_security_group}
+
+ int_net_port1:
+ type: OS::Neutron::Port
+ properties:
+ network: {get_param: k8s_int_net}
+ port_security_enabled: false
+ # security_groups:
+ # - {get_resource: server_security_group}
+ fixed_ips: [{"ip_address": {get_param: k8s_slave1_ip}}]
+
+ slave0:
+ type: OS::Nova::Server
+ properties:
+ name: "k8s-slave0"
+ key_name: {get_param: key_name}
+ flavor: {get_resource: {if: ["has_dpdk", "flavor_dpdk", "flavor"]}}
+ image: "xenial"
+ user_data: {get_resource: server_user_data0}
+ user_data_format: RAW
+ # security_groups:
+ # - {get_resource: server_security_group}
+ networks:
+ - port: {get_resource: mgmt_port0}
+ - port: {get_resource: int_net_port0}
+
+ slave1:
+ type: OS::Nova::Server
+ properties:
+ name: "k8s-slave1"
+ key_name: {get_param: key_name}
+ flavor: {get_resource: {if: ["has_dpdk", "flavor_dpdk", "flavor"]}}
+ image: "xenial"
+ user_data: {get_resource: server_user_data1}
+ user_data_format: RAW
+ # security_groups:
+ # - {get_resource: server_security_group}
+ networks:
+ - port: {get_resource: mgmt_port1}
+ - port: {get_resource: int_net_port1}
--- /dev/null
+#!/bin/bash
+set -ex
+echo "K8s Master IP is k8s_master_ip"
+sudo sed -i -e 's/^\([0-9]\+\.[0-9]\+\.[0-9]\+\.[0-9]\+\)\([\t ]\+\)\(k8s_slave_hostname.*$\)/k8s_slave_ip\2\3/g' /etc/hosts
+apt update
+apt install sshpass
+pwd
+git clone https://gerrit.akraino.org/r/iec
+cd iec/src/foundation/scripts
+./k8s_common.sh
+joincmd=$(sshpass -p k8s_password ssh -o StrictHostKeyChecking=no k8s_user@k8s_master_ip 'for i in {1..300}; do if [ -f /home/ubuntu/joincmd ]; then break; else sleep 1; fi; done; cat /home/ubuntu/joincmd')
+eval sudo $joincmd
--- /dev/null
+# Calico Version v3.3.2
+# https://docs.projectcalico.org/v3.3/releases#v3.3.2
+# This manifest includes the following component versions:
+# calico/node:v3.3.2
+# calico/cni:v3.3.2
+# calico/kube-controllers:v3.3.2
+
+# This ConfigMap is used to configure a self-hosted Calico installation.
+kind: ConfigMap
+apiVersion: v1
+metadata:
+ name: calico-config
+ namespace: kube-system
+data:
+ # Configure this with the location of your etcd cluster.
+ etcd_endpoints: "http://10.96.232.136:6666"
+
+ # If you're using TLS enabled etcd uncomment the following.
+ # You must also populate the Secret below with these files.
+ etcd_ca: "" # "/calico-secrets/etcd-ca"
+ etcd_cert: "" # "/calico-secrets/etcd-cert"
+ etcd_key: "" # "/calico-secrets/etcd-key"
+ # Configure the Calico backend to use.
+ calico_backend: "bird"
+
+ # Configure the MTU to use
+ veth_mtu: "1440"
+
+ # The CNI network configuration to install on each node. The special
+ # values in this config will be automatically populated.
+ cni_network_config: |-
+ {
+ "name": "k8s-pod-network",
+ "cniVersion": "0.3.0",
+ "plugins": [
+ {
+ "type": "calico",
+ "log_level": "info",
+ "etcd_endpoints": "__ETCD_ENDPOINTS__",
+ "etcd_key_file": "__ETCD_KEY_FILE__",
+ "etcd_cert_file": "__ETCD_CERT_FILE__",
+ "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
+ "mtu": __CNI_MTU__,
+ "ipam": {
+ "type": "calico-ipam"
+ },
+ "policy": {
+ "type": "k8s"
+ },
+ "kubernetes": {
+ "kubeconfig": "__KUBECONFIG_FILEPATH__"
+ }
+ },
+ {
+ "type": "portmap",
+ "snat": true,
+ "capabilities": {"portMappings": true}
+ }
+ ]
+ }
+
+---
+
+
+# The following contains k8s Secrets for use with a TLS enabled etcd cluster.
+# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/
+apiVersion: v1
+kind: Secret
+type: Opaque
+metadata:
+ name: calico-etcd-secrets
+ namespace: kube-system
+data:
+ # Populate the following files with etcd TLS configuration if desired, but leave blank if
+ # not using TLS for etcd.
+ # This self-hosted install expects three files with the following names. The values
+ # should be base64 encoded strings of the entire contents of each file.
+ # etcd-key: null
+ # etcd-cert: null
+ # etcd-ca: null
+
+---
+
+# This manifest installs the calico/node container, as well
+# as the Calico CNI plugins and network config on
+# each master and worker node in a Kubernetes cluster.
+kind: DaemonSet
+apiVersion: extensions/v1beta1
+metadata:
+ name: calico-node
+ namespace: kube-system
+ labels:
+ k8s-app: calico-node
+spec:
+ selector:
+ matchLabels:
+ k8s-app: calico-node
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ maxUnavailable: 1
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-node
+ annotations:
+ # This, along with the CriticalAddonsOnly toleration below,
+ # marks the pod as a critical add-on, ensuring it gets
+ # priority scheduling and that its resources are reserved
+ # if it ever gets evicted.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ hostNetwork: true
+ tolerations:
+ # Make sure calico-node gets scheduled on all nodes.
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ serviceAccountName: calico-node
+ # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
+ # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
+ terminationGracePeriodSeconds: 0
+ containers:
+ # Runs calico/node container on each Kubernetes node. This
+ # container programs network policy and routes on each
+ # host.
+ - name: calico-node
+ image: calico/node:v3.3.2
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # Location of the CA certificate for etcd.
+ - name: ETCD_CA_CERT_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_ca
+ # Location of the client key for etcd.
+ - name: ETCD_KEY_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_key
+ # Location of the client certificate for etcd.
+ - name: ETCD_CERT_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_cert
+ # Set noderef for node controller.
+ - name: CALICO_K8S_NODE_REF
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # Choose the backend to use.
+ - name: CALICO_NETWORKING_BACKEND
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: calico_backend
+ # Cluster type to identify the deployment type
+ - name: CLUSTER_TYPE
+ value: "k8s,bgp"
+ # Auto-detect the BGP IP address.
+ - name: IP
+ value: "autodetect"
+ - name: IP_AUTODETECTION_METHOD
+ value: "can-reach=www.google.com"
+ # Enable IPIP
+ - name: CALICO_IPV4POOL_IPIP
+ value: "Always"
+ # Set MTU for tunnel device used if ipip is enabled
+ - name: FELIX_IPINIPMTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ # The default IPv4 pool to create on startup if none exists. Pod IPs will be
+ # chosen from this range. Changing this value after installation will have
+ # no effect. This should fall within `--cluster-cidr`.
+ - name: CALICO_IPV4POOL_CIDR
+ value: "192.168.0.0/16"
+ # Disable file logging so `kubectl logs` works.
+ - name: CALICO_DISABLE_FILE_LOGGING
+ value: "true"
+ # Set Felix endpoint to host default action to ACCEPT.
+ - name: FELIX_DEFAULTENDPOINTTOHOSTACTION
+ value: "ACCEPT"
+ # Disable IPv6 on Kubernetes.
+ - name: FELIX_IPV6SUPPORT
+ value: "false"
+ # Set Felix logging to "info"
+ - name: FELIX_LOGSEVERITYSCREEN
+ value: "info"
+ - name: FELIX_HEALTHENABLED
+ value: "true"
+ securityContext:
+ privileged: true
+ resources:
+ requests:
+ cpu: 250m
+ livenessProbe:
+ httpGet:
+ path: /liveness
+ port: 9099
+ host: localhost
+ periodSeconds: 10
+ initialDelaySeconds: 10
+ failureThreshold: 6
+ readinessProbe:
+ exec:
+ command:
+ - /bin/calico-node
+ - -bird-ready
+ - -felix-ready
+ periodSeconds: 10
+ volumeMounts:
+ - mountPath: /lib/modules
+ name: lib-modules
+ readOnly: true
+ - mountPath: /run/xtables.lock
+ name: xtables-lock
+ readOnly: false
+ - mountPath: /var/run/calico
+ name: var-run-calico
+ readOnly: false
+ - mountPath: /var/lib/calico
+ name: var-lib-calico
+ readOnly: false
+ - mountPath: /calico-secrets
+ name: etcd-certs
+ # This container installs the Calico CNI binaries
+ # and CNI network config file on each node.
+ - name: install-cni
+ image: calico/cni:v3.3.2
+ command: ["/install-cni.sh"]
+ env:
+ # Name of the CNI config file to create.
+ - name: CNI_CONF_NAME
+ value: "10-calico.conflist"
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # The CNI network config to install on each node.
+ - name: CNI_NETWORK_CONFIG
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: cni_network_config
+ # CNI MTU Config variable
+ - name: CNI_MTU
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: veth_mtu
+ volumeMounts:
+ - mountPath: /host/opt/cni/bin
+ name: cni-bin-dir
+ - mountPath: /host/etc/cni/net.d
+ name: cni-net-dir
+ - mountPath: /calico-secrets
+ name: etcd-certs
+ volumes:
+ # Used by calico/node.
+ - name: lib-modules
+ hostPath:
+ path: /lib/modules
+ - name: var-run-calico
+ hostPath:
+ path: /var/run/calico
+ - name: var-lib-calico
+ hostPath:
+ path: /var/lib/calico
+ - name: xtables-lock
+ hostPath:
+ path: /run/xtables.lock
+ type: FileOrCreate
+ # Used to install CNI.
+ - name: cni-bin-dir
+ hostPath:
+ path: /opt/cni/bin
+ - name: cni-net-dir
+ hostPath:
+ path: /etc/cni/net.d
+ # Mount in the etcd TLS secrets with mode 400.
+ # See https://kubernetes.io/docs/concepts/configuration/secret/
+ - name: etcd-certs
+ secret:
+ secretName: calico-etcd-secrets
+ defaultMode: 0400
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-node
+ namespace: kube-system
+
+---
+
+# This manifest deploys the Calico Kubernetes controllers.
+# See https://github.com/projectcalico/kube-controllers
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ annotations:
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+spec:
+ # The controllers can only have a single active instance.
+ replicas: 1
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+ labels:
+ k8s-app: calico-kube-controllers
+ spec:
+ nodeSelector:
+ beta.kubernetes.io/os: linux
+ # The controllers must run in the host network namespace so that
+ # it isn't governed by policy that would prevent it from working.
+ hostNetwork: true
+ tolerations:
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ serviceAccountName: calico-kube-controllers
+ containers:
+ - name: calico-kube-controllers
+ image: calico/kube-controllers:v3.3.2
+ env:
+ # The location of the Calico etcd cluster.
+ - name: ETCD_ENDPOINTS
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_endpoints
+ # Location of the CA certificate for etcd.
+ - name: ETCD_CA_CERT_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_ca
+ # Location of the client key for etcd.
+ - name: ETCD_KEY_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_key
+ # Location of the client certificate for etcd.
+ - name: ETCD_CERT_FILE
+ valueFrom:
+ configMapKeyRef:
+ name: calico-config
+ key: etcd_cert
+ # Choose which controllers to run.
+ - name: ENABLED_CONTROLLERS
+ value: policy,namespace,serviceaccount,workloadendpoint,node
+ volumeMounts:
+ # Mount in the etcd TLS secrets.
+ - mountPath: /calico-secrets
+ name: etcd-certs
+ readinessProbe:
+ exec:
+ command:
+ - /usr/bin/check-status
+ - -r
+ volumes:
+ # Mount in the etcd TLS secrets with mode 400.
+ # See https://kubernetes.io/docs/concepts/configuration/secret/
+ - name: etcd-certs
+ secret:
+ secretName: calico-etcd-secrets
+ defaultMode: 0400
+
+---
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: calico-kube-controllers
+ namespace: kube-system
+
--- /dev/null
+# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
+# to force it to run on the master even when the master isn't schedulable, and uses
+# nodeSelector to ensure it only runs on the master.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: calico-etcd
+ namespace: kube-system
+ labels:
+ k8s-app: calico-etcd
+spec:
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-etcd
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # This taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the Calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ # Allow this pod to run on the master.
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ hostNetwork: true
+ containers:
+ - name: calico-etcd
+ image: quay.io/coreos/etcd:v3.3.9
+ env:
+ - name: CALICO_ETCD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ command:
+ - /usr/local/bin/etcd
+ args:
+ - --name=calico
+ - --data-dir=/var/etcd/calico-data
+ - --advertise-client-urls=http://$(CALICO_ETCD_IP):6666
+ - --listen-client-urls=http://0.0.0.0:6666
+ - --listen-peer-urls=http://0.0.0.0:6667
+ - --auto-compaction-retention=1
+ volumeMounts:
+ - name: var-etcd
+ mountPath: /var/etcd
+ volumes:
+ - name: var-etcd
+ hostPath:
+ path: /var/etcd
+
+---
+
+# This manifest installs the Service which gets traffic to the Calico
+# etcd.
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: calico-etcd
+ name: calico-etcd
+ namespace: kube-system
+spec:
+ # Select the calico-etcd pod running on the master.
+ selector:
+ k8s-app: calico-etcd
+ # This ClusterIP needs to be known in advance, since we cannot rely
+ # on DNS to get access to etcd.
+ clusterIP: 10.96.232.136
+ ports:
+ - port: 6666
--- /dev/null
+# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
+# to force it to run on the master even when the master isn't schedulable, and uses
+# nodeSelector to ensure it only runs on the master.
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: calico-etcd
+ namespace: kube-system
+ labels:
+ k8s-app: calico-etcd
+spec:
+ template:
+ metadata:
+ labels:
+ k8s-app: calico-etcd
+ annotations:
+ # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
+ # reserves resources for critical add-on pods so that they can be rescheduled after
+ # a failure. This annotation works in tandem with the toleration below.
+ scheduler.alpha.kubernetes.io/critical-pod: ''
+ spec:
+ tolerations:
+ # This taint is set by all kubelets running `--cloud-provider=external`
+ # so we should tolerate it to schedule the Calico pods
+ - key: node.cloudprovider.kubernetes.io/uninitialized
+ value: "true"
+ effect: NoSchedule
+ # Allow this pod to run on the master.
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
+ # This, along with the annotation above marks this pod as a critical add-on.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ # Only run this pod on the master.
+ nodeSelector:
+ node-role.kubernetes.io/master: ""
+ hostNetwork: true
+ containers:
+ - name: calico-etcd
+ image: quay.io/coreos/etcd:v3.3.9-arm64
+ env:
+ - name: CALICO_ETCD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: ETCD_UNSUPPORTED_ARCH
+ value: "arm64"
+ command:
+ - /usr/local/bin/etcd
+ args:
+ - --name=calico
+ - --data-dir=/var/etcd/calico-data
+ - --advertise-client-urls=http://$(CALICO_ETCD_IP):6666
+ - --listen-client-urls=http://0.0.0.0:6666
+ - --listen-peer-urls=http://0.0.0.0:6667
+ - --auto-compaction-retention=1
+ volumeMounts:
+ - name: var-etcd
+ mountPath: /var/etcd
+ volumes:
+ - name: var-etcd
+ hostPath:
+ path: /var/etcd
+
+---
+
+# This manifest installs the Service which gets traffic to the Calico
+# etcd.
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ k8s-app: calico-etcd
+ name: calico-etcd
+ namespace: kube-system
+spec:
+ # Select the calico-etcd pod running on the master.
+ selector:
+ k8s-app: calico-etcd
+ # This ClusterIP needs to be known in advance, since we cannot rely
+ # on DNS to get access to etcd.
+ clusterIP: 10.96.232.136
+ ports:
+ - port: 6666
--- /dev/null
+# Calico Version v3.3.2
+# https://docs.projectcalico.org/v3.3/releases#v3.3.2
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-kube-controllers
+rules:
+ - apiGroups:
+ - ""
+ - extensions
+ resources:
+ - pods
+ - namespaces
+ - networkpolicies
+ - nodes
+ - serviceaccounts
+ verbs:
+ - watch
+ - list
+ - apiGroups:
+ - networking.k8s.io
+ resources:
+ - networkpolicies
+ verbs:
+ - watch
+ - list
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-kube-controllers
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-kube-controllers
+subjects:
+- kind: ServiceAccount
+ name: calico-kube-controllers
+ namespace: kube-system
+
+---
+
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: calico-node
+rules:
+ - apiGroups: [""]
+ resources:
+ - pods
+ - nodes
+ - namespaces
+ verbs:
+ - get
+ - apiGroups: [""]
+ resources:
+ - nodes/status
+ verbs:
+ - patch
+
+---
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRoleBinding
+metadata:
+ name: calico-node
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: calico-node
+subjects:
+- kind: ServiceAccount
+ name: calico-node
+ namespace: kube-system
--- /dev/null
+#!/bin/bash
+
+# Host user which can log into master and each worker nodes
+HOST_USER=${HOST_USER:-iec}
+
+REPO_URL="https://gerrit.akraino.org/r/iec"
+#log file
+LOG_FILE="kubeadm.log"
+
+
+# Master node IP address
+K8S_MASTER_IP="10.169.36.152"
+
+# HOST_USER's password on Master node
+K8S_MASTERPW="123456"
+
+######################################################
+#
+# K8S_WORKER_GROUP is an array which consists of 2
+# parts. One is k8s worker ip address. The other is
+# the user password.
+#
+######################################################
+K8S_WORKER_GROUP=(
+"10.169.40.106,123456"
+)
+
+
--- /dev/null
+#!/bin/bash
+# Run the functest-kubernetes on master node for checking
+# K8s environments
+set -e
+
+K8S_MASTER_IP=$1
+
+
+if [ -z "${K8S_MASTER_IP}" ]
+then
+ echo "Error:K8S_MASTER_IP is empty."
+ echo "Please input the k8s master ip address."
+ echo "Just as:"
+ echo "./functest.sh 10.1.1.1"
+ exit 1
+fi
+
+
+cat <<EOF > "${HOME}/k8.creds"
+export KUBERNETES_PROVIDER=local
+export KUBE_MASTER_URL=https://${K8S_MASTER_IP}:6443
+export KUBE_MASTER_IP=${K8S_MASTER_IP}
+EOF
+
+mkdir -p "${HOME}/functest/results"
+
+sudo docker run --rm -e DEPLOY_SCENARIO=k8-nosdn-nofeature-noha \
+ -v "${HOME}/k8.creds:/home/opnfv/functest/conf/env_file" \
+ -v "${HOME}/functest/results:/home/opnfv/functest/results" \
+ -v "${HOME}/.kube/config:/root/.kube/config" opnfv/functest-kubernetes-healthcheck:latest \
+ /bin/bash -c 'run_tests -r -t all'
--- /dev/null
+#!/bin/bash -ex
+
+VERSION='v2.12.3'
+TILLER_SA_RBAC=~/tiller-rbac.yaml
+if [ "$(uname -m)" == 'aarch64' ]; then
+ FLAVOR='linux-arm64'
+else
+ FLAVOR='linux-amd64'
+fi
+
+URI_ROOT='https://storage.googleapis.com/kubernetes-helm'
+TGZ_NAME="helm-${VERSION}-${FLAVOR}.tar.gz"
+
+if [ ! -e /usr/bin/helm ] || [ ! -e /usr/bin/tiller ]; then
+ wget -O "/tmp/${TGZ_NAME}" "${URI_ROOT}/${TGZ_NAME}"
+ sudo tar xpPf "/tmp/${TGZ_NAME}" --overwrite \
+ --transform "s|${FLAVOR}|/usr/bin|" "${FLAVOR}/"{helm,tiller}
+ rm -f "/tmp/${TGZ_NAME}"
+fi
+
+if ! kubectl get serviceaccounts --namespace=kube-system | grep -q tiller; then
+ cat <<EOF > "${TILLER_SA_RBAC}"
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: tiller
+ namespace: kube-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: tiller
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cluster-admin
+subjects:
+ - kind: ServiceAccount
+ name: tiller
+ namespace: kube-system
+EOF
+ kubectl create -f "${TILLER_SA_RBAC}"
+ helm init --service-account tiller --tiller-image="jessestuart/tiller:${VERSION}"
+fi
+rm -f "${TILLER_SA_RBAC}"
DOCKER_VERSION=18.06.1~ce~3-0~ubuntu
KUBE_VERSION=1.13.0-00
+K8S_CNI_VERSION=0.6.0-00
# Install Docker as Prerequisite
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
sudo apt update
+# Minor fix for broken kubernetes-cni dependency in upstream xenial repo
sudo apt install -y \
- kubelet=${KUBE_VERSION} kubeadm=${KUBE_VERSION} kubectl=${KUBE_VERSION}
-apt-mark hold kubelet kubeadm kubectl
+ kubernetes-cni=${K8S_CNI_VERSION} kubelet=${KUBE_VERSION} kubeadm=${KUBE_VERSION} kubectl=${KUBE_VERSION}
+sudo apt-mark hold kubernetes-cni kubelet kubeadm kubectl
+
+#Add extra flags to Kubelet
+if ! grep -q -e 'fail-swap-on' /etc/default/kubelet; then
+ sudo sed 's/KUBELET_EXTRA_ARGS=/KUBELET_EXTRA_ARGS=--fail-swap-on=false --feature-gates HugePages=false/' -i /etc/default/kubelet
+fi
_conf='/etc/sysctl.d/99-akraino-iec.conf'
echo 'net.bridge.bridge-nf-call-iptables = 1' |& sudo tee "${_conf}"
--- /dev/null
+#!/bin/bash
+set -o xtrace
+set -e
+
+#install essential software
+source k8s_common.sh
+
+# install kubernetes
svcip=$(kubectl get services nginx -o json | grep clusterIP | cut -f4 -d'"')
sleep 10
-wget "http://$svcip"
+wget -O /dev/null "http://$svcip"
kubectl delete -f "${NGINX_APP}"
+rm -f "${NGINX_APP}"
kubectl get rc
kubectl get pods
kubectl get services
--- /dev/null
+#!/bin/bash
+set -o xtrace
+set -e
+
+CLUSTER_IP=${1:-172.16.1.136} # Align with the value in our K8s setup script
+POD_NETWORK_CIDR=${2:-192.168.0.0/16}
+
+# Install the Etcd Database
+if [ "$(uname -m)" == 'aarch64' ]; then
+ ETCD_YAML=etcd-arm64.yaml
+else
+ ETCD_YAML=etcd-amd64.yaml
+fi
+
+SCRIPTS_DIR=$(dirname "${BASH_SOURCE[0]}")
+
+sed -i "s/10.96.232.136/${CLUSTER_IP}/" "${SCRIPTS_DIR}/cni/calico/${ETCD_YAML}"
+kubectl apply -f "${SCRIPTS_DIR}/cni/calico/${ETCD_YAML}"
+
+# Install the RBAC Roles required for Calico
+kubectl apply -f "${SCRIPTS_DIR}/cni/calico/rbac.yaml"
+
+# Install Calico to system
+sed -i "s@10.96.232.136@${CLUSTER_IP}@; s@192.168.0.0/16@${POD_NETWORK_CIDR}@" \
+ "${SCRIPTS_DIR}/cni/calico/calico.yaml"
+kubectl apply -f "${SCRIPTS_DIR}/cni/calico/calico.yaml"
+
+# Remove the taints on master node
+kubectl taint nodes --all node-role.kubernetes.io/master- || true
--- /dev/null
+#!/bin/bash
+#Install the k8s-master & k8s-worker node from Mgnt node
+#
+set -e
+
+#
+# Displays the help menu.
+#
+display_help () {
+ echo "Usage:"
+ echo " "
+ echo "This script can help you to deploy a simple iec testing"
+ echo "environments."
+ echo "Firstly, the master node and worker node information must"
+ echo "be added into config file which will be used for deployment."
+ echo ""
+ echo "Secondly, there should be an user on each node which will be"
+ echo "used to install the corresponding software on master and"
+ echo "worker nodes. At the same time, this user should be enable to"
+ echo "run the sudo command without input password on the hosts."
+ echo " "
+ echo "Example usages:"
+ echo " ./startup.sh"
+}
+
+
+
+#
+# Deploy k8s with calico.
+#
+deploy_k8s () {
+ set -o xtrace
+
+ INSTALL_SOFTWARE="sudo apt-get update && sudo apt-get install -y git &&\
+ sudo rm -rf ~/.kube ~/iec &&\
+ git clone ${REPO_URL} &&\
+ cd iec/src/foundation/scripts/ && source k8s_common.sh"
+
+ #Automatic deploy the K8s environments on Master node
+ SETUP_MASTER="cd iec/src/foundation/scripts/ && source k8s_master.sh ${K8S_MASTER_IP}"
+ sshpass -p ${K8S_MASTERPW} ssh ${HOST_USER}@${K8S_MASTER_IP} ${INSTALL_SOFTWARE}
+ sshpass -p ${K8S_MASTERPW} ssh ${HOST_USER}@${K8S_MASTER_IP} ${SETUP_MASTER} | tee ${LOG_FILE}
+
+ KUBEADM_JOIN_CMD=$(grep "kubeadm join " ./${LOG_FILE})
+
+
+ #Automatic deploy the K8s environments on each worker-node
+ SETUP_WORKER="cd iec/src/foundation/scripts/ && source k8s_worker.sh"
+
+ for worker in "${K8S_WORKER_GROUP[@]}"
+ do
+ ip_addr="$(cut -d',' -f1 <<<${worker})"
+ passwd="$(cut -d',' -f2 <<<${worker})"
+ echo "Install & Deploy on ${ip_addr}. password:${passwd}"
+
+ sshpass -p ${passwd} ssh ${HOST_USER}@${ip_addr} ${INSTALL_SOFTWARE}
+ sshpass -p ${passwd} ssh ${HOST_USER}@${ip_addr} "echo \"sudo ${KUBEADM_JOIN_CMD}\" >> ./iec/src/foundation/scripts/k8s_worker.sh"
+ sshpass -p ${passwd} ssh ${HOST_USER}@${ip_addr} ${SETUP_WORKER}
+
+ done
+
+
+ #Deploy etcd & CNI from master node
+ #There may be more options in future. e.g: Calico, Contiv-vpp, Ovn-k8s ...
+ SETUP_CNI="cd iec/src/foundation/scripts && source setup-cni.sh"
+ sshpass -p ${K8S_MASTERPW} ssh ${HOST_USER}@${K8S_MASTER_IP} ${SETUP_CNI}
+}
+
+#
+# Check the K8s environments
+#
+check_k8s_status(){
+ set -o xtrace
+
+ VERIFY_K8S="cd iec/src/foundation/scripts/ && source nginx.sh"
+ sshpass -p ${K8S_MASTERPW} ssh ${HOST_USER}@${K8S_MASTER_IP} ${VERIFY_K8S}
+
+ sleep 30
+}
+
+
+#
+# Init
+#
+if [ $1 == "--help" ] || [ $1 == "-h" ];
+then
+ display_help
+ exit 0
+fi
+
+
+# Read the configuration file
+source config
+
+echo "The number of K8s-Workers:${#K8S_WORKER_GROUP[@]}"
+
+rm -f "${LOG_FILE}"
+
+deploy_k8s
+
+check_k8s_status
--- /dev/null
+..
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain
+ a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ License for the specific language governing permissions and limitations
+ under the License.
+
+ Convention for heading levels in Integrated Edge Cloud documentation:
+
+ ======= Heading 0 (reserved for the title in a document)
+ ------- Heading 1
+ ~~~~~~~ Heading 2
+ +++++++ Heading 3
+ ''''''' Heading 4
+
+ Avoid deeper levels because they do not render well.
+
+
+====================
+SEBA on Arm Use Case
+====================
+
+.. _SEBA: https://wiki.opencord.org/display/CORD/SEBA
+
+SDN-Enabled Broadband Access (SEBA) is an Exemplar Platform being built by the ONF and CORD community,
+which would also be a sample use case of IEC.
+
+We would enable SEBA on arm with the native installation method for SEBA by great efforts.
+
+Sub-Folder Introduction
+-----------------------
+install: installation scripts and files for SEBA-on-arm
+src_repo: the source code repository that SEBA-on-arm uses
+docker: dockfiles and related scripts to build and manage the docker images used in SEBA-on-arm
+charts: Helm charts used in the SEBA-on-arm installation
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the alertmanager docker image
+set -o errexit
+set -o xtrace
+
+#cd $ALERTMANAGER_PATH
+
+#building alertmanager docker image
+make build
+make docker
+docker tag alertmanager:v0.15.0-arm64v8 iecedge/alertmanager_arm64:v0.15.0
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the filebeat-oss docker image
+set -o errexit
+set -o xtrace
+
+#cd $BEATS-DOCKER_PATH
+
+#building filebeat-oss docker image
+IMAGE_TAG=akrainoenea/filebeat ELASTIC_VERSION=6.4.2 IMAGE_FLAVORS=oss DEFAULT_IMAGE_FLAVOR=oss make filebeat
+docker tag docker.elastic.co/beats/filebeat-oss:6.4.2 iecedge/filebeat-oss_arm64:6.4.2
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the k8szk docker image
+set -o errexit
+set -o xtrace
+
+#cd $K8SZK_PATH
+
+#Build Docker Image
+cd statefulsets/zookeeper/
+docker build -t iecedge/k8szk_arm64:v3 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the cord-tester docker image
+set -o errexit
+set -o xtrace
+
+#cd $CORD-TESTER_PATH
+
+#building cord-tester docker image
+cd src/test/cord-api/
+docker build -f Dockerfile.k8s-api-tester -t iecedge/xos-api-tester_arm64:master .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the cp-kafka docker image
+
+#cd $CP-DOCKER-IMAGES_PATH
+
+set -o errexit
+set -o xtrace
+
+# Building the Images
+make build-debian
+docker tag confluentinc/cp-kafka:4.1.2 iecedge/cp-kafka_arm64:4.1.2
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the docker-curl docker image
+set -o errexit
+set -o xtrace
+
+#cd $DOCKER-CURL_PATH
+
+#building docker-curl docker image
+cd latest
+docker build -t iecedge/curl_arm64:latest .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the elasticsearch-docker docker image
+set -o errexit
+set -o xtrace
+
+#cd $ELASTICSEARCH-DOCKER_PATH
+
+#building elasticsearch-docker docker image
+make
+docker tag docker.elastic.co/elasticsearch/elasticsearch-oss:6.4.2 iecedge/elasticsearch-oss_arm64:6.4.2
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the etcd-operator docker images
+set -o errexit
+set -o xtrace
+
+#cd $ETCD-OPERATOR-DOCKER_PATH
+
+#building etcd-operator docker images
+docker build -t iecedge/etcd-operator_arm64:v0.9.2 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the etcd docker image
+set -o errexit
+set -o xtrace
+
+#cd $ETCD_PATH
+
+#building etcd docker image
+./scripts/build-docker v3.3.10
+docker tag quay.io/coreos/etcd:v3.3.10-arm64 iecedge/etcd_arm64:v3.3.10
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the fabric-crossconnect docker image
+set -o errexit
+set -o xtrace
+
+#cd $FABRIC-CROSSCONNECT_PATH
+
+#building fabric-crossconnect docker image
+docker build -t iecedge/fabric-crossconnect-synchronizer_arm64:1.1.4 -f Dockerfile.synchronizer .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the fabric docker image
+set -o errexit
+set -o xtrace
+
+#cd $FABRIC_PATH
+
+#building fabric docker image
+docker build -t iecedge/fabric-synchronizer_arm64:2.1.6 -f Dockerfile.synchronizer .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the freeradius docker image
+set -o errexit
+set -o xtrace
+
+#cd $FREERADIUS_PATH
+
+#building freeradius docker image
+docker build -t iecedge/freeradius_arm64:2.2.8 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the jmx-prometheus-exporter docker image
+set -o errexit
+set -o xtrace
+
+#cd $JMX-PROMETHEUS-EXPORTER_PATH
+
+#Build Docker Image
+docker build -t iecedge/jmx-prometheus-exporter_arm64:master .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the k8s-sidecar docker image
+set -o errexit
+set -o xtrace
+
+#cd $K8S-SIDECAR_PATH
+
+#building k8s-sidecar docker image
+docker build -t iecedge/k8s-sidecar_arm64:0.0.3 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the kafka-exporter binaries and docker image
+set -o errexit
+set -o xtrace
+
+#cd $KAFKA-EXPORTER_PATH
+
+#Build Binary
+make
+#Build Docker Image
+make docker
+docker tag kafka-exporter:master iecedge/kafka-exporter_arm64:master
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the kafka-prometheus-jmx docker image
+set -o errexit
+set -o xtrace
+
+#cd $KAFKA-PROMETHEUS-JMX_PATH
+
+#Build Docker Image
+cd kafka
+docker build -t iecedge/kafka-prometheus-jmx-exporter_arm64:misc-dockerfiles .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the kafka-topic-exporter docker image
+set -o errexit
+set -o xtrace
+
+#cd $KAFKA-TOPIC-EXPORTER_PATH
+
+#building kafka-topic-exporter docker image
+docker build -t iecedge/kafka-topic-exporter_arm64:1.1.2 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the kibana-docker docker image
+set -o errexit
+set -o xtrace
+
+#cd $KIBANA-DOCKER_PATH
+
+#building kibana-docker docker image
+make
+docker tag docker.elastic.co/kibana/kibana-oss:6.4.2 iecedge/kibana-oss_arm64:6.4.2
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the kube-state-metrics docker image
+set -o errexit
+set -o xtrace
+
+#cd $KUBE-STATE-METRICS_PATH
+
+#building kube-state-metrics docker image
+make container
+docker tag quay.io/coreos/kube-state-metrics-arm64:v1.3.1 iecedge/kube-state-metrics_arm64:v1.3.1
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the kubernetes-synchronizer docker image
+set -o errexit
+set -o xtrace
+
+#cd $KUBERNETES-SYNCHRONIZER _PATH
+
+#building kubernetes-synchronizer docker image
+docker build -t iecedge/kubernetes-synchronizer_arm64:1.1.3 -f Dockerfile.synchronizer .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the fluentd-elasticsearch docker image
+set -o errexit
+set -o xtrace
+
+#cd $FLUENTD-ELASTICSEARCH_PATH
+
+#building fluentd-elasticsearch docker image
+cd ./cluster/addons/fluentd-elasticsearch/fluentd-es-image
+make build
+docker tag akrainoenea/fluentd-elasticsearch:v2.3.1 iecedge/fluentd-elasticsearch_arm64:v2.3.1
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the logstash-docker docker image
+set -o errexit
+set -o xtrace
+
+#cd $LOGSTASH-DOCKER_PATH
+
+#building logstash-docker docker image
+make
+docker tag docker.elastic.co/logstash/logstash-oss:6.4.2 iecedge/logstash-oss_arm64:6.4.2
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the logstash_exporter docker image
+set -o errexit
+set -o xtrace
+
+#cd $LOGSTASH-EXPORTER_PATH
+
+#building logstash_exporter docker image
+docker build -t iecedge/logstash_explorer_arm64:v0.1.2 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the node_exporter docker image
+set -o errexit
+set -o xtrace
+
+#cd $NODE_EXPORTER_PATH
+
+#building node_exporter docker image
+make build
+make docker
+docker tag node-exporter:release-0.16-arm64v8 iecedge/node-exporter_arm64:v0.16.0
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the olt-service docker image
+set -o errexit
+set -o xtrace
+
+#cd $OLT-SERVICE_PATH
+
+#building olt-service docker image
+docker build -t iecedge/volt-synchronizer_arm64:2.1.14 -f Dockerfile.synchronizer .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the onos-service docker image
+set -o errexit
+set -o xtrace
+
+#cd $ONOS-SERVICE_PATH
+
+#building onos-service docker image
+docker build -f Dockerfile.synchronizer -t iecedge/onos-synchronizer_arm64:2.0.7 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the onos docker image
+set -o errexit
+set -o xtrace
+
+#cd $ONOS_PATH
+
+#building onos docker image
+docker build -t iecedge/onos_arm64:v1.13.5 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the prometheus docker image
+set -o errexit
+set -o xtrace
+
+#cd $PROMETHEUS_PATH
+
+#building prometheus docker image
+make build
+make docker
+docker tag prometheus:v2.3.1-arm64v8 iecedge/prometheus_arm64:v2.3.1
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the pushgateway docker image
+set -o errexit
+set -o xtrace
+
+#cd $PUSHGATEWAY_PATH
+
+#building pushgateway docker image
+make build
+make docker
+docker tag pushgateway:v0.5.2-arm64v8 iecedge/pushgateway_arm64:v0.5.2
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the rcord-synchronizer docker image
+set -o errexit
+set -o xtrace
+
+#cd $RCORD-SYNCHRONIZER _PATH
+
+#building rcord-synchronizer docker image
+docker build -t iecedge/rcord-synchronizer_arm64:1.0.8 -f Dockerfile.synchronizer .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the sadis-server docker image
+set -o errexit
+set -o xtrace
+
+#cd $SADIS-SERVER_PATH
+
+#building sadis-server docker image
+docker build -t iecedge/sadis-server_arm64:1.0.1 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the tosca-loader docker image
+set -o errexit
+set -o xtrace
+
+#cd $TOSCA-LOADER_PATH
+
+#building tosca-loader docker image
+cd loader/
+docker build -f Dockerfile.tosca-loader -t iecedge/tosca-loader_arm64:1.1.5 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the voltha docker images
+set -o errexit
+set -o xtrace
+
+#cd $VOLTHA_PATH
+
+#building voltha docker images
+VOLTHA_BUILD=docker make seba
+docker tag voltha-voltha:latest iecedge/voltha-voltha_arm64:1.6.0
+docker tag voltha-cli:latest iecedge/voltha-cli_arm64:1.6.0
+docker tag voltha-ofagent:latest iecedge/voltha-ofagent_arm64:1.6.0
+docker tag voltha-netconf:latest iecedge/voltha-netconf_arm64:1.6.0
+docker tag voltha-envoy:latest iecedge/voltha-envoy_arm64:1.6.0
+docker tag voltha-alarm-generator:latest iecedge/voltha-alarm-generator_arm64:1.6.0
+docker tag voltha-ponsim:latest iecedge/voltha-ponsim_arm64:1.6.0
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the xos-gui docker image
+set -o errexit
+set -o xtrace
+
+#cd $XOS-GUI_PATH
+
+#building xos-gui docker image
+docker build -f Dockerfile -t iecedge/xos-gui_arm64:master .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the xos-tosca docker image
+set -o errexit
+set -o xtrace
+
+#cd $XOS-TOSCA_PATH
+
+#building xos-tosca docker image
+make build
+docker tag xosproject/xos-tosca:latest iecedge/xos-tosca_arm64:1.1.6
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the xos-ws docker image
+set -o errexit
+set -o xtrace
+
+#cd $XOS-WS_PATH
+
+#building xos-ws docker image
+docker build -t iecedge/xos-ws_arm64:master .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the xos docker image
+set -o errexit
+set -o xtrace
+
+#cd $XOS_PATH
+
+#building xos-core docker image
+docker build -f ./containers/xos/Dockerfile.xos-core -t iecedge/xos-core_arm64 .
+
+#building Chameleon for XOS
+cd containers/chameleon
+make
+docker build -f Dockerfile.chameleon -t iecedge/chameleon_arm64:2.1.25 .
--- /dev/null
+#!/usr/bin/env bash
+
+# This script builds the zookeeper_exporter docker image
+set -o errexit
+set -o xtrace
+
+#cd $ZOOKEEPER_EXPORTER_PATH
+
+#Build Docker Image
+docker build -t iecedge/zookeeper-exporter_arm64:master .
--- /dev/null
+#!/bin/bash -ex
+# shellcheck disable=SC2016
+
+#Modified from https://github.com/cachengo/seba_charts/blob/master/scripts/installSEBA.sh
+
+basepath=$(cd `dirname $0`; pwd)
+CORD_REPO=${CORD_REPO:-https://charts.opencord.org}
+CORD_PLATFORM_VERSION=${CORD_PLATFORM_VERSION:-6.1.0}
+SEBA_VERSION=${SEBA_VERSION:-1.0.0}
+ATT_WORKFLOW_VERSION=${ATT_WORKFLOW_VERSION:-1.0.2}
+
+CORD_CHART=${CORD_CHART:-${basepath}/../src_repo/seba_charts}
+
+# TODO(alav): Make each step re-entrant
+
+function wait_for {
+ # Execute in a subshell to prevent local variable override during recursion
+ (
+ local total_attempts=$1; shift
+ local cmdstr=$*
+ local sleep_time=2
+ echo -e "\n[wait_for] Waiting for cmd to return success: ${cmdstr}"
+ # shellcheck disable=SC2034
+ for attempt in $(seq "${total_attempts}"); do
+ echo "[wait_for] Attempt ${attempt}/${total_attempts%.*} for: ${cmdstr}"
+ # shellcheck disable=SC2015
+ eval "${cmdstr}" && echo "[wait_for] OK: ${cmdstr}" && return 0 || true
+ sleep "${sleep_time}"
+ done
+ echo "[wait_for] ERROR: Failed after max attempts: ${cmdstr}"
+ return 1
+ )
+}
+
+wait_for 10 'test $(kubectl get pods --all-namespaces | grep -ce "tiller.*Running") -eq 1'
+
+# Add the CORD repository and update indexes
+
+if [ "$(uname -m)" == "aarch64" ]; then
+ if [ ! -d ${CORD_CHART}/cord-platform ]; then
+ #git clone https://github.com/iecedge/seba_charts ${CORD_CHART}
+ cd ${basepath}/../src_repo && git submodule update seba_charts
+ fi
+else
+ helm repo add cord "${CORD_REPO}"
+ helm repo update
+ CORD_CHART=cord
+fi
+
+
+# Install the CORD platform
+helm install -n cord-platform ${CORD_CHART}/cord-platform --version="${CORD_PLATFORM_VERSION}"
+# Wait until 3 etcd CRDs are present in Kubernetes
+wait_for 300 'test $(kubectl get crd | grep -ice etcd) -eq 3'
+
+# Install the SEBA profile
+helm install -n seba --version "${SEBA_VERSION}" ${CORD_CHART}/seba
+wait_for 1500 'test $(kubectl get pods | grep -vcE "(\s(.+)/\2.*Running|tosca-loader.*Completed)") -eq 1'
+
+# Install the AT&T workflow
+helm install -n att-workflow --version "${ATT_WORKFLOW_VERSION}" ${CORD_CHART}/att-workflow
+wait_for 300 'test $(kubectl get pods | grep -vcE "(\s(.+)/\2.*Running|tosca-loader.*Completed)") -eq 1'
--- /dev/null
+From f13d46ea72f5e0d9e8a43a10b19b9914ece25d02 Mon Sep 17 00:00:00 2001
+From: jialv01 <Jianlin.Lv@arm.com>
+Date: Mon, 15 Apr 2019 21:29:50 +0800
+Subject: [PATCH] Correct path errors and tar command errors
+
+Correct path errors and tar command errors
+
+Signed-off-by: jialv01 <Jianlin.Lv@arm.com>
+---
+ templates/Dockerfile.j2 | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/templates/Dockerfile.j2 b/templates/Dockerfile.j2
+index 9cdcb4a..31ed20b 100644
+--- a/templates/Dockerfile.j2
++++ b/templates/Dockerfile.j2
+@@ -24,8 +24,8 @@ FROM centos:7
+
+ RUN yum update -y && yum install -y curl && yum clean all
+
+-COPY --from=golang /go/src/github.com/alexandruavadanii/beats/{{ beat }}/build/distributions/{{ beat }}-{{ elastic_version }}-linux-arm64.tar.gz /tmp
+-RUN tar zxf /tmp/FIXME -C /tmp && \
++COPY --from=golang /go/src/github.com/elastic/beats/{{ beat }}/build/distributions/{{ beat }}-{{ elastic_version }}-linux-arm64.tar.gz /tmp
++RUN tar zxf /tmp/{{ beat }}-{{ elastic_version }}-linux-arm64.tar.gz -C /tmp && \
+ mv /tmp/{{ beat }}-{{ elastic_version }}-linux-arm64 {{ beat_home }} && \
+ rm /tmp/{{ beat }}-{{ elastic_version }}-linux-arm64.tar.gz
+
+--
+2.17.1
+
--- /dev/null
+From 0d93a605f2a8dc34ce4040e5f34ae68d636fce8b Mon Sep 17 00:00:00 2001
+From: jialv01 <Jianlin.Lv@arm.com>
+Date: Wed, 27 Mar 2019 15:25:58 +0800
+Subject: [PATCH] modify Makefile for aarch64 support
+
+Signed-off-by: jialv01 <Jianlin.Lv@arm.com>
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index e2ba788..d44171c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -55,7 +55,7 @@ release: promu github-release
+
+ promu:
+ @GOOS=$(shell uname -s | tr A-Z a-z) \
+- GOARCH=$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m))) \
++ GOARCH=$(subst aarch64,arm64,$(subst x86_64,amd64,$(patsubst i%86,386,$(shell uname -m)))) \
+ $(GO) get -u github.com/prometheus/promu
+
+ github-release:
+--
+2.17.1
+
--- /dev/null
+From 6be92d42b8239bbeb69882a4992138c075a47b00 Mon Sep 17 00:00:00 2001
+From: jialv01 <Jianlin.Lv@arm.com>
+Date: Thu, 28 Mar 2019 16:52:48 +0800
+Subject: [PATCH] Modify the URL to properly download kafka bin
+
+Signed-off-by: jialv01 <Jianlin.Lv@arm.com>
+---
+ kafka/Dockerfile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kafka/Dockerfile b/kafka/Dockerfile
+index 4d43b54..19782bb 100644
+--- a/kafka/Dockerfile
++++ b/kafka/Dockerfile
+@@ -5,7 +5,7 @@ ENV kafka_bin_version=2.12-$kafka_version
+
+ RUN apk add --no-cache --update-cache --virtual build-dependencies curl ca-certificates \
+ && mkdir -p /opt/kafka \
+- && curl -SLs "https://www-eu.apache.org/dist/kafka/$kafka_version/kafka_$kafka_bin_version.tgz" | tar -xzf - --strip-components=1 -C /opt/kafka \
++ && curl -SLs "https://archive.apache.org/dist/kafka/$kafka_version/kafka_$kafka_bin_version.tgz" |tar -xzf - --strip-components=1 -C /opt/kafka \
+ && apk del build-dependencies \
+ && rm -rf /var/cache/apk/*
+
+--
+2.17.1
+
--- /dev/null
+From 4485e25f152b18be11cbdc00287a9ab2146db50c Mon Sep 17 00:00:00 2001
+From: jialv01 <Jianlin.Lv@arm.com>
+Date: Wed, 3 Apr 2019 17:03:56 +0800
+Subject: [PATCH] Modify rcord Dockerfile for support Arm64
+
+Signed-off-by: jialv01 <Jianlin.Lv@arm.com>
+---
+ Dockerfile.synchronizer | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Dockerfile.synchronizer b/Dockerfile.synchronizer
+index c84eab6..9509770 100644
+--- a/Dockerfile.synchronizer
++++ b/Dockerfile.synchronizer
+@@ -16,7 +16,7 @@
+ # docker build -t xosproject/rcord-synchronizer:candidate -f Dockerfile.synchronizer .
+
+ # xosproject/rcord-synchronizer
+-FROM xosproject/xos-synchronizer-base:2.1.25
++FROM cachengo/xos-synchronizer-base:2.1.38
+
+ COPY xos/synchronizer /opt/xos/synchronizers/rcord
+ COPY VERSION /opt/xos/synchronizers/rcord/
+--
+2.17.1
+
--- /dev/null
+From 9108849f974a6513027b2cb80f64e5ee0f22a9fe Mon Sep 17 00:00:00 2001
+From: jialv01 <Jianlin.Lv@arm.com>
+Date: Wed, 10 Apr 2019 14:23:31 +0800
+Subject: [PATCH] pin pynacl to 1.1.2 recent version fail to install
+
+pin pynacl to 1.1.2, Recent versions of pynacl, specifically 1.2.0, 1.2.1, and 1.3.0 fail to install
+
+Signed-off-by: jialv01 <Jianlin.Lv@arm.com>
+---
+ requirements.txt | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/requirements.txt b/requirements.txt
+index 5f2cfd2..004a069 100755
+--- a/requirements.txt
++++ b/requirements.txt
+@@ -63,6 +63,7 @@ etcd3==0.7.0
+ pyparsing==2.2.0
+ packaging==17.1
+ pexpect==4.6.0
++pynacl==1.1.2
+
+ # python-consul>=0.6.1 we need the pre-released version for now, because 0.6.1 does not
+ # yet support Twisted. Once this is released, it will be the 0.6.2 version
+--
+2.17.1
+
--- /dev/null
+From bc98df909b9551f62fc5777c79f3903fee704bf6 Mon Sep 17 00:00:00 2001
+From: jialv01 <Jianlin.Lv@arm.com>
+Date: Thu, 11 Apr 2019 13:58:56 +0800
+Subject: [PATCH 2/2] Support ponsim arm64 docker image build
+
+Support ponsim arm64 docker image build
+
+Signed-off-by: jialv01 <Jianlin.Lv@arm.com>
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index d015bb2..c6ceded 100644
+--- a/Makefile
++++ b/Makefile
+@@ -237,7 +237,7 @@ jenkins-containers: base voltha ofagent netconf consul cli envoy fluentd unum j2
+
+ prod-containers: base voltha ofagent netconf shovel onos dashd cli grafana consul tools envoy fluentd unum j2
+
+-seba-containers: base voltha ofagent netconf shovel onos tester config-push dashd cli portainer envoy alarm-generator test_runner
++seba-containers: base voltha ofagent netconf shovel onos tester config-push dashd cli portainer envoy alarm-generator ponsim test_runner
+
+ containers: base voltha ofagent netconf shovel onos tester config-push dashd cli portainer grafana nginx consul tools envoy fluentd unum ponsim j2 alarm-generator test_runner
+
+--
+2.17.1
+
--- /dev/null
+Subproject commit bdd8c66b202e92aba9c1e5acba4587cf4d639101
--- /dev/null
+Subproject commit 3ff5db2897a1254f1131fefe14eaa60049a8c0c3
--- /dev/null
+Subproject commit b72fa5059d8ce9de234b65c392dc03bca8a73be4
--- /dev/null
+Subproject commit cbe174c8b830f472731f5b729a32ee08d4555995
--- /dev/null
+Subproject commit 9b9973b6cdf6eca5aaf86e16b19ef688b3db81a5
--- /dev/null
+Subproject commit af2cc9f44f7a62666fa667d0c5664e26664fa5d7
--- /dev/null
+Subproject commit 540153ae8cb482b3769fd3ba2126bfd70df884d9
--- /dev/null
+Subproject commit 27fc7e2296f506182f58ce846e48f36b34fe6842
--- /dev/null
+Subproject commit fc5c9e7e20eef0d29fb706f651ae3096359039a9
--- /dev/null
+Subproject commit dc1d411dca3df3259f4aef9411675e8695f21704
--- /dev/null
+Subproject commit 3d6a98a510d18b5683e4be4b96a099a572f5b0c9
--- /dev/null
+Subproject commit 017861f7ac6191e501cff9bd01900d0ffe622916
--- /dev/null
+Subproject commit 7996bf137b07d019bb370acb6300f244d6fde3f5
--- /dev/null
+Subproject commit dd78ae923f06ff36a318d837d55fc09e488af577
--- /dev/null
+Subproject commit 56c1d47f9794d59d042852cc31558257e24d2ea0
--- /dev/null
+Subproject commit a595dcf74a0d7161a9c6bdf8858090bfc2411642
--- /dev/null
+Subproject commit 06502b26f88c467b08ad2614e89fa3f89015528f
--- /dev/null
+Subproject commit ea21f2e2ce4464e37da9fdc3158f4c44e5ff75ec
--- /dev/null
+Subproject commit 43fcd3cf48f9ab6d0361210c5debb4e2aa1d2fc0
--- /dev/null
+FROM alpine:3.6
+
+LABEL MAINTAINER="Sergii Nuzhdin <ipaq.lw@gmail.com@gmail.com>"
+
+ENV KUBE_LATEST_VERSION="v1.13.0"
+
+RUN apk add --update ca-certificates \
+ && apk add --update -t deps curl \
+ && apk add --update gettext \
+ && curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl \
+ && chmod +x /usr/local/bin/kubectl \
+ && apk del --purge deps \
+ && rm /var/cache/apk/*
--- /dev/null
+FROM alpine:3.6
+
+ENV KUBE_LATEST_VERSION="v1.13.0"
+
+RUN apk add --update ca-certificates \
+ && apk add --update -t deps curl \
+ && apk add --update gettext \
+ && curl -L https://storage.googleapis.com/kubernetes-release/release/${KUBE_LATEST_VERSION}/bin/linux/arm64/kubectl -o /usr/local/bin/kubectl \
+ && chmod +x /usr/local/bin/kubectl \
+ && apk del --purge deps \
+ && rm /var/cache/apk/*
--- /dev/null
+Forked from https://github.com/lwolf/kubectl-deployer-docker.git
+Added Dockerfile.arm64 for aarch64 support.
--- /dev/null
+# kubectl-deployer-docker
+
+Alpine based image with kubectl and gettext used in gitlab-ci build steps.
+
+http://blog.lwolf.org/post/continuous-deployment-to-kubernetes-from-gitlab-ci/
+
+docker image location: https://hub.docker.com/r/lwolf/kubectl_deployer/
+
+| Tag | Alpine | Kubectl |
+| :----: | :----: | :-----: |
+| latest | 3.6 | 1.13.0 |
+| 1.13.0 | 3.6 | 1.13.0 |
+| 1.10.0 | 3.6 | 1.10.0 |
+| 1.8.9 | 3.6 | 1.8.9 |
+| 0.4 | 3.6 | 1.6.4 |
+| 0.3 | 3.4 | 1.6.4 |
+| 0.2 | 3.4 | 1.5.2 |
--- /dev/null
+Subproject commit 9e73a9a2804332e7d2560372f18395e6fc0c58c0
--- /dev/null
+Subproject commit ae8734f79fa99cde901f73f849a90e78a3943530
--- /dev/null
+Subproject commit 9448cf4572b1d43f4f3beaffbdd8b6ed77cbbb19
--- /dev/null
+Subproject commit 302e9e86b2aa99e1239a5075ea27b89b69480d37
--- /dev/null
+Subproject commit b8236a6fdc10ada61777f1b45cc7f7d211f95e90
--- /dev/null
+Subproject commit 27a51dd3c60fabcf18faf94bb5f96109f3d24167
--- /dev/null
+Subproject commit 187e13d77773ea76f146458dd4702f0ea86c9cd3
--- /dev/null
+Subproject commit 31cd6cdd22cbc832a23d040560e2846630b09a0d
--- /dev/null
+Subproject commit a08732a8629fbf80d730f180f3dec977adb07b9f
--- /dev/null
+Subproject commit 01b565e0e02f58eb1772a5c41c42ce6769aa2553
--- /dev/null
+Subproject commit 4ed1df6a0fac69ec5a947a426ea07face235c43f
--- /dev/null
+Subproject commit 988c04a47d16a7ca4995722042759b37898c6684
--- /dev/null
+Subproject commit c38779cc6479dee71e0a178c8401c932798450e5
--- /dev/null
+Subproject commit 45be57abaa422d7fbd93e79ec5e43b80838834b2
--- /dev/null
+Subproject commit 4c9bf24d5a345d827bd60fd1a691740a3da9a2c9
--- /dev/null
+Subproject commit c22305da34a0aef9e89f7f636b4ae06beed5e85c
--- /dev/null
+Subproject commit 292dc884f1d94e34404a34967cf2c180219444eb
--- /dev/null
+Subproject commit ea0d963c70431dad1fe6b8149d99d011b4b72bbf
--- /dev/null
+Subproject commit 29e6c7ce992e8395ce024adbce2fe89ebb3f3487
--- /dev/null
+Subproject commit 972e0da376355b6ecfbfb6266aa730346baca974
--- /dev/null
+##############################################################################
+# Copyright (c) 2019 Linux Foundation and others.
+# All rights reserved. This program and the accompanying materials
+# are made available under the terms of the Apache License, Version 2.0
+# which accompanies this distribution, and is available at
+# http://www.apache.org/licenses/LICENSE-2.0
+##############################################################################
+[tox]
+minversion = 1.6
+envlist =
+ docs,
+ docs-linkcheck
+skipsdist = true
+
+[testenv:docs]
+deps = -rdocs/requirements.txt
+commands =
+ sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html
+ echo "Generated docs available in {toxinidir}/docs/_build/html"
+whitelist_externals = echo
+
+[testenv:docs-linkcheck]
+deps = -rdocs/requirements.txt
+commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck