robot tcs, test charts, robot container added 09/1809/40 master
authorEndre Nemeth <endre.nemeth@nokia.com>
Tue, 15 Oct 2019 14:21:57 +0000 (16:21 +0200)
committerEndre Nemeth <endre.nemeth@nokia.com>
Wed, 8 Jan 2020 16:09:19 +0000 (17:09 +0100)
Change-Id: I62fc3e2921f1dbbee5338bf22e15d322b5d49ccc
Signed-off-by: Endre Nemeth <endre.nemeth@nokia.com>
345 files changed:
.gitignore
.gitreview [new file with mode: 0644]
.pylintrc
libraries/common/common_utils.py [new file with mode: 0644]
libraries/common/decorators_for_robot_functionalities.py [new file with mode: 0644]
libraries/common/execute_command.py [new file with mode: 0644]
libraries/common/stack_infos.py [new file with mode: 0644]
libraries/common/test_constants.py [new file with mode: 0644]
libraries/common/users.py [new file with mode: 0644]
requirements-minimal.txt
requirements.txt
resources/robot_container/Dockerfile [new file with mode: 0644]
resources/robot_container/README.rst [new file with mode: 0644]
resources/robot_container/robot-deployment-test.sh [new file with mode: 0755]
resources/scripts/build-test-containers.sh [new file with mode: 0755]
resources/scripts/include/crf-registry [new file with mode: 0755]
resources/scripts/include/robot_container.env [new file with mode: 0644]
resources/scripts/prepare_robot_bm.py [new file with mode: 0755]
resources/scripts/robot-test-build.sh [new file with mode: 0755]
resources/scripts/robot-test-run.sh [new file with mode: 0755]
resources/test_charts/busybox3/Chart.yaml [new file with mode: 0644]
resources/test_charts/busybox3/templates/busybox.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/Chart.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_01.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_02_01.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_02_02.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_03.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_04_01.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_04_02.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_05.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_06.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_07.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_08.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_09.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_10.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_11.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_12.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/Chart.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/templates/cnet_01.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/templates/cnet_02.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/templates/cnet_03.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/templates/cnet_04.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/templates/cnet_05.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/templates/cnet_06.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/templates/cnet_07.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/templates/cnet_08.yaml [new file with mode: 0644]
resources/test_charts/clusternetwork-test/templates/cnet_09.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-annotation1/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-annotation1/templates/cpupooling-deployment9.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-annotation1/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-annotation2/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-annotation2/templates/cpupooling-deployment10.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-annotation2/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-annotation3/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-annotation3/templates/cpupooling-deployment11.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-annotation3/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-default1/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-default1/templates/cpupooling-deployment7.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-default1/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-default2/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-default2/templates/cpupooling-deployment8.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-default2/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-exclusive1/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-exclusive1/templates/cpupooling-deployment1.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-exclusive1/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-exclusive2/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-exclusive2/templates/cpupooling-deployment2.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-exclusive2/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-exclusive3/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-exclusive3/templates/cpupooling-deployment3.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-exclusive3/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-mix1/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-mix1/templates/cpupooling-deployment5.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-mix1/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-mix2/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-mix2/templates/cpupooling-deployment6.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-mix2/values.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-shared1/Chart.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-shared1/templates/cpupooling-deployment4.yaml [new file with mode: 0644]
resources/test_charts/cpu-pooling-shared1/values.yaml [new file with mode: 0644]
resources/test_charts/custom-metrics/Chart.yaml [new file with mode: 0644]
resources/test_charts/custom-metrics/templates/podinfo-dep.yaml [new file with mode: 0644]
resources/test_charts/custom-metrics/templates/podinfo-hpa-custom.yaml [new file with mode: 0644]
resources/test_charts/custom-metrics/templates/podinfo-svc.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods1/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods1/templates/danmnet-pods1-1.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods1/templates/danmnet-pods1-2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods1/templates/danmnet-pods1-3.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods1/templates/danmnet-pods1-4.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods10/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods10/templates/danmnet-pods10.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods11/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods11/templates/danmnet-pods11.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods12/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods12/templates/danmnet-pods12.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods13/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods13/templates/danmnet-pods13.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods14/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods14/templates/danmnet-pods14.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods2/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods2/templates/danmnet-pods2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods3/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods3/templates/danmnet-pods3-1.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods3/templates/danmnet-pods3-2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods3/templates/danmnet-pods3-3.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods3/templates/danmnet-pods3-4.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods4/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods4/templates/danmnet-pods4.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods5/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods5/templates/danmnet-pods5.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods6/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods6/templates/danmnet-pods6-1.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods6/templates/danmnet-pods6-2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods7/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods7/templates/danmnet-pods7_1.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods8/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods8/templates/danmnet-pods8_1.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods8/templates/danmnet-pods8_1_service.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods9/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-pods9/templates/danmnet-pods9_1.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net1.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net11.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net13.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net15.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net16.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net20.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net21.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net23.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net24.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net25.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net26.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net28.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net30.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net5.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net7.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net8.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/d_test-net9.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/ks_test-net2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/ks_test-net27.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-2/templates/ks_test-net29.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net1.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net11.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net13.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net15.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net16.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net20.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net21.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net23.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net24.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net25.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net26.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net28.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net30.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net5.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net7.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net8.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/d_test-net9.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/ks_test-net2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/ks_test-net27.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-3/templates/ks_test-net29.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net10.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net11.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net12.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net14.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net17.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net18.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net19.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net22.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net3.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test-error/templates/d_test-net9.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/Chart.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net1.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net13.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net15.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net16.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net20.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net21.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net23.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net24.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net25.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net26.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net28.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net30.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net4.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net5.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net6.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net7.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/d_test-net8.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/ks_test-net2.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/ks_test-net27.yaml [new file with mode: 0644]
resources/test_charts/danmnet-test/templates/ks_test-net29.yaml [new file with mode: 0644]
resources/test_charts/http-traffic-gen/Chart.yaml [new file with mode: 0644]
resources/test_charts/http-traffic-gen/templates/http-traffic-gen-dep.yaml [new file with mode: 0644]
resources/test_charts/load-generator-for-apache/Chart.yaml [new file with mode: 0644]
resources/test_charts/load-generator-for-apache/templates/load-generator.yml [new file with mode: 0644]
resources/test_charts/logger/Chart.yaml [new file with mode: 0644]
resources/test_charts/logger/templates/logger.yaml [new file with mode: 0644]
resources/test_charts/network-attach-test/Chart.yaml [new file with mode: 0644]
resources/test_charts/network-attach-test/templates/cnet_pod1.yaml [new file with mode: 0644]
resources/test_charts/network-attach-test/templates/cnet_pod2.yaml [new file with mode: 0644]
resources/test_charts/network-attach-test/templates/cnet_pod3.yaml [new file with mode: 0644]
resources/test_charts/network-attach-test/templates/cnet_pod4.yaml [new file with mode: 0644]
resources/test_charts/network-attach-test/templates/cnet_pod5.yaml [new file with mode: 0644]
resources/test_charts/network-attach-test/templates/cnet_pod6.yaml [new file with mode: 0644]
resources/test_charts/network-attach-test/templates/cnet_pod7.yaml [new file with mode: 0644]
resources/test_charts/persistentvolume-claim/Chart.yaml [new file with mode: 0644]
resources/test_charts/persistentvolume-claim/templates/persistentvolume-claim.yml [new file with mode: 0644]
resources/test_charts/php-apache/Chart.yaml [new file with mode: 0644]
resources/test_charts/php-apache/templates/flannel.yaml [new file with mode: 0644]
resources/test_charts/php-apache/templates/php-apache-deployment.yml [new file with mode: 0644]
resources/test_charts/php-apache/templates/php-apache-hpa.yml [new file with mode: 0644]
resources/test_charts/php-apache/templates/php-apache-service.yml [new file with mode: 0644]
resources/test_charts/storage-test-oam/Chart.yaml [new file with mode: 0644]
resources/test_charts/storage-test-oam/templates/pv-test-deployment.yml [new file with mode: 0644]
resources/test_charts/storage-test-worker/Chart.yaml [new file with mode: 0644]
resources/test_charts/storage-test-worker/templates/pv-test-deployment.yml [new file with mode: 0644]
resources/test_charts/su-test/Chart.yaml [new file with mode: 0644]
resources/test_charts/su-test/templates/su-test.yaml [new file with mode: 0644]
resources/test_charts/su-test/values.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_01.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_02.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_03.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_04.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_05.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_06.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_07.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_08.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_09.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test/templates/tconf_01.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test/templates/tconf_02.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test/templates/tconf_03.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test/templates/tconf_04.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test/templates/tconf_05.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test/templates/tconf_06.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test/templates/tconf_07.yaml [new file with mode: 0644]
resources/test_charts/tenantconfig-test/templates/tconf_08.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod1/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_03.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_04.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod10/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod10/templates/tennet_pod_10.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod11/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod11/templates/tennet_pod_11.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod12/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod12/templates/tennet_pod_12.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod13/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod13/templates/tennet_pod_13.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod14/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod14/templates/tennet_pod_14.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod2/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod2/templates/tennet_pod_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod3/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_03.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_04.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod4/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod4/templates/tennet_pod_04.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod5/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod5/templates/tennet_pod_05.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod6/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod6/templates/tennet_pod_06_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod6/templates/tennet_pod_06_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod7/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod7/templates/tennet_pod_07.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod8/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod8/templates/tennet_pod_08.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod8/templates/tennet_pod_08_service.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod9/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod9/templates/tennet_pod_09_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod9/templates/tennet_pod_09_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-pod9/templates/tennet_pod_09_service.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-test/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_03.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_04.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_05.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_06.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_07.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_03_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_03_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_04_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_04_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_05.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_06.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_07_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_07_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_08.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_09.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_10.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_11.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test/Chart.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test/templates/tennet_01.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test/templates/tennet_02.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test/templates/tennet_03.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test/templates/tennet_04.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test/templates/tennet_05.yaml [new file with mode: 0644]
resources/test_charts/tenantnetwork-test/templates/tennet_06.yaml [new file with mode: 0644]
resources/test_containers/alpine_test/Dockerfile [new file with mode: 0644]
resources/test_containers/busybox/Dockerfile [new file with mode: 0644]
resources/test_containers/http-traffic-gen/Dockerfile [new file with mode: 0644]
resources/test_containers/http-traffic-gen/http_traffic_gen.py [new file with mode: 0644]
resources/test_containers/logger/Dockerfile [new file with mode: 0644]
resources/test_containers/logger/textgen.py [new file with mode: 0644]
resources/test_containers/php-apache/Dockerfile [new file with mode: 0644]
resources/test_containers/php-apache/index.php [new file with mode: 0644]
resources/test_containers/podinfo/Dockerfile [new file with mode: 0644]
testcases/HPA_check/Custom_HPA_check.py [new file with mode: 0644]
testcases/HPA_check/HPA_check.py [new file with mode: 0644]
testcases/basic_func_tests/tc_002_pod_health_check.py [new file with mode: 0644]
testcases/basic_func_tests/tc_003_test_registry.py [new file with mode: 0644]
testcases/basic_func_tests/tc_004_ssh_file_check.py [new file with mode: 0644]
testcases/basic_func_tests/tc_005_ssh_dns_server_check.py [new file with mode: 0644]
testcases/basic_func_tests/tc_006_ssh_test_ext_ntp.py [new file with mode: 0644]
testcases/basic_func_tests/tc_007_ssh_test_overlay_quota.py [new file with mode: 0644]
testcases/basic_func_tests/tc_008_storage_check.py [new file with mode: 0644]
testcases/cpu_pooling/tc_001_cpu_pool_validation_tests.py [new file with mode: 0644]
testcases/cpu_pooling/tc_002_exclusive_pool_tests.py [new file with mode: 0644]
testcases/cpu_pooling/tc_003_exclusive_pool_tests_more_cpu.py [new file with mode: 0644]
testcases/cpu_pooling/tc_004_shared_cpu_pool_tests.py [new file with mode: 0644]
testcases/danm_network_check/danm_utils.py [new file with mode: 0644]
testcases/danm_network_check/tc_001_danmnet_object_check.py [new file with mode: 0644]
testcases/danm_network_check/tc_002_tenantnetwork_pod_check.py [new file with mode: 0644]
testcases/danm_network_check/tc_003_clusternetwork_pod_check.py [new file with mode: 0644]
testcases/fluentd/tc_001_ssh_test_fluentd_logging.py [new file with mode: 0644]
testcases/fluentd/tc_002_elasticsearch_storage_check.py [new file with mode: 0644]
testcases/parallel_suites/cpu-pooling.robot [new file with mode: 0644]
testcases/parallel_suites/danm_network_check.robot [new file with mode: 0644]
testcases/parallel_suites/elasticity_test.robot [new file with mode: 0644]
testcases/parallel_suites/ssh_check.robot [new file with mode: 0644]
tox.ini

index 07ba6ee..caf6bbf 100644 (file)
@@ -3,6 +3,7 @@
 *.swn
 *.tox/
 *.pyc
 *.swn
 *.tox/
 *.pyc
+*.log
 log.html
 output.xml
 report.html
 log.html
 output.xml
 report.html
@@ -14,3 +15,5 @@ libraries/cloudtaflibs.egg-info/
 coverage-html-py27/
 rfcli_output/
 targets/*.ini
 coverage-html-py27/
 rfcli_output/
 targets/*.ini
+.idea
+pabot_logs/
diff --git a/.gitreview b/.gitreview
new file mode 100644 (file)
index 0000000..4da4b41
--- /dev/null
@@ -0,0 +1,5 @@
+[gerrit]
+host=gerrit.akraino.org
+port=29418
+project=ta/cloudtaf
+defaultremote=origin
index fe5d705..174740a 100644 (file)
--- a/.pylintrc
+++ b/.pylintrc
@@ -65,7 +65,7 @@ confidence=
 # --enable=similarities". If you want to run only the classes checker, but have
 # no Warning level messages displayed, use"--disable=all --enable=classes
 # --disable=W"
 # --enable=similarities". If you want to run only the classes checker, but have
 # no Warning level messages displayed, use"--disable=all --enable=classes
 # --disable=W"
-disable= missing-docstring, locally-disabled
+disable= missing-docstring, locally-disabled, unused-import, wildcard-import, unused-wildcard-import, dangerous-default-value, invalid-name, duplicate-code, too-many-arguments, wrong-import-order, wrong-import-position, broad-except, fixme, unexpected-keyword-arg, redefined-builtin, global-variable-undefined, anomalous-backslash-in-string, global-statement
 
 [REPORTS]
 
 
 [REPORTS]
 
@@ -192,7 +192,7 @@ max-nested-blocks=5
 [FORMAT]
 
 # Maximum number of characters on a single line.
 [FORMAT]
 
 # Maximum number of characters on a single line.
-max-line-length=100
+max-line-length=120
 
 # Regexp for a line that is allowed to be longer than the limit.
 ignore-long-lines=^\s*(# )?<?https?://\S+>?$
 
 # Regexp for a line that is allowed to be longer than the limit.
 ignore-long-lines=^\s*(# )?<?https?://\S+>?$
diff --git a/libraries/common/common_utils.py b/libraries/common/common_utils.py
new file mode 100644 (file)
index 0000000..058ccc2
--- /dev/null
@@ -0,0 +1,374 @@
+import time
+import subprocess
+import os
+import re
+from datetime import datetime
+from datetime import timedelta
+import yaml
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+import ruamel.yaml
+from decorators_for_robot_functionalities import *
+from test_constants import *
+from users import *
+
+LOG_DIR = os.path.join(os.path.dirname(__file__))
+ex = BuiltIn().get_library_instance('execute_command')
+SSHLIB = ex.get_ssh_library_instance()
+STACK_INFOS = BuiltIn().get_library_instance('stack_infos')
+BuiltIn().import_library('pabot.PabotLib')
+PABOT = BuiltIn().get_library_instance('pabot.PabotLib')
+
+
+def keyword_runner(keywords, counter=0):
+    try:
+        BuiltIn().run_keyword(keywords[counter])
+    except Exception as err:
+        raise err
+    finally:
+        counter += 1
+        if len(keywords) > counter:
+            keyword_runner(keywords, counter)
+
+
+@robot_log
+def gather_logs(command, logfile_name, local_path):
+    remote_file_path = ROBOT_LOG_PATH + logfile_name
+    local_file_path = os.path.join(local_path, logfile_name)
+    ex.execute_unix_command_as_root("echo  -e '****** This is the output of: " +
+                                    command + " ****** \n' > " + remote_file_path)
+    ex.execute_unix_command_as_root(command + " >> " + remote_file_path)
+    ex.execute_unix_command_as_root("chmod 777 " + remote_file_path)
+    SSHLIB.get_file(remote_file_path, local_file_path)
+    ex.execute_unix_command_as_root("rm -f " + remote_file_path)
+
+
+@robot_log
+def gather_logs_from_remote(command, logfile_name, local_path, host, user={}):
+    if not user:
+        user = ex.get_default_user()
+    local_file_path = os.path.join(local_path, logfile_name)
+    remote_file_path = ROBOT_LOG_PATH + logfile_name
+    ex.execute_unix_command_on_remote_as_root("echo  -e '****** This is the output of: " +
+                                              command + " ****** \n' > " + remote_file_path, host, user, )
+    ex.execute_unix_command_on_remote_as_root(command + " >> " + remote_file_path, host, user)
+    transfer_file_from_remote(remote_file_path, remote_file_path, local_file_path, host, user)
+    ex.execute_unix_command_on_remote_as_root("rm -f " + remote_file_path, host, user)
+
+
+@robot_log
+def transfer_file_from_remote(remote_file_path, temp_file_path, local_file_path, host, user):
+    """"
+      This method is used to transfer a file  to the localhost, from a node other than the CRF_node_1.
+      :param remote_file_path: full file path on the remote node
+      :param temp_file_path: full file path on the CRF_node_1
+      :param local_file_path: full file path on the localhost
+      :param host: ip/hostname of the remote node
+      :param user: this user is used with the scp command
+    """
+    scp_command = "scp " + user['username'] + "@" + host + ":" + remote_file_path + " " + temp_file_path
+    SSHLIB.write(scp_command)
+    SSHLIB.read_until(host + "'s password:")
+    SSHLIB.write(user['password'])
+    SSHLIB.read_until(user['prompt'])
+    SSHLIB.get_file(temp_file_path, local_file_path)
+    ex.execute_unix_command_as_root("rm -f " + temp_file_path)
+
+
+def wait_for_healthy_kube_controller_manager():
+    wait_until = datetime.now() + timedelta(seconds=180)
+    command = "kubectl get componentstatus | grep controller-manager | grep Healthy | wc -l"
+    result = ex.execute_unix_command_as_root(command)
+    while (result < 1) and (datetime.now() < wait_until):
+        logger.info("datetime.now:" + str(datetime.now()))
+        logger.info("wait_until:" + str(wait_until))
+        logger.info("Controller-manager is not healthy yet, waiting...")
+        time.sleep(10)
+        result = ex.execute_unix_command_as_root(command)
+    if result < 1:
+        raise Exception("Controller-manager is not healthy!")
+
+
+@pabot_lock("health_check_1")
+@pabot_lock("modify_static_pod_config")
+def modify_static_pod_config(operation,
+                             manifest_file,
+                             flags):
+    """
+    This method inserts/removes the given flag list into the manifest file of a static pod.
+
+    :param manifest_file: manifest file name with extension present in /etc/kubernetes/manifests folder
+    :param flags: flags which will be given to the executed command in the container
+    :param operation: add or remove
+
+    """
+    crf_nodes = STACK_INFOS.get_crf_nodes()
+    if not crf_nodes:
+        logger.info("Nodes dictionary is empty, nothing to check.")
+        return
+    logger.info("adding flag to pod file")
+    for key in crf_nodes:
+        ex.execute_unix_command_on_remote_as_root("mv /etc/kubernetes/manifests/" +
+                                                  manifest_file + " /tmp/" + manifest_file, crf_nodes[key])
+        yaml_content = ruamel.yaml.round_trip_load(ex.execute_unix_command_on_remote_as_root("cat /tmp/" +
+                                                                                             manifest_file,
+                                                                                             crf_nodes[key]),
+                                                   preserve_quotes=True)
+        for actual_flag in flags:
+            operation(yaml_content, actual_flag)
+
+        yaml_content = ruamel.yaml.round_trip_dump(yaml_content, default_flow_style=False)
+    kube_controller_manager['obj_count'] = str(len(crf_nodes))
+    check_kubernetes_object(kube_controller_manager, test_kubernetes_object_not_available, timeout=300)
+
+    for key in crf_nodes:
+        ex.execute_unix_command_on_remote_as_root("echo \"" + yaml_content + "\" > /etc/kubernetes/manifests/" +
+                                                  manifest_file, crf_nodes[key])
+        ex.execute_unix_command_on_remote_as_root("rm -f /tmp/" + manifest_file, crf_nodes[key])
+    check_kubernetes_object(kube_controller_manager, test_kubernetes_object_available,
+                            additional_filter="Running", timeout=300)
+    wait_for_healthy_kube_controller_manager()
+
+
+@robot_log
+def add_flag_to_command(yaml_content, flag):
+    yaml_content["spec"]["containers"][0]["command"].append(flag)
+
+
+@robot_log
+def remove_flag_from_command(yaml_content, flag):
+    yaml_content["spec"]["containers"][0]["command"].remove(flag)
+
+
+@robot_log
+def helm_install(chart_name, release_name, values="registry_url={reg_url}".format(reg_url=reg)):
+    command = "helm install " + chart_name + " --name " + release_name
+    if values:
+        command += " --set " + values
+    ex.execute_unix_command(command, fail_on_non_zero_rc=False)
+    if helm_list(release_name) == '1':
+        logger.info(chart_name + " chart is successfully installed")
+    else:
+        raise Exception(chart_name + " chart install has failed.")
+
+
+@robot_log
+def helm_delete(release_name):
+    ex.execute_unix_command("helm delete " + release_name + " --purge ", delay="30s", fail_on_non_zero_rc=False)
+    if helm_list(release_name) == '0':
+        logger.info(release_name + " chart is successfully deleted")
+    else:
+        raise Exception(release_name + " chart delete has failed.")
+
+
+@robot_log
+def helm_list(release_name, add_check_arg=''):
+    grep_arg = 'grep -w {}'.format(release_name)
+    if add_check_arg != '':
+        grep_arg += '| grep -w {}'.format(add_check_arg)
+    command = "helm list --all | {} | wc -l".format(grep_arg)
+    stdout, _ = ex.execute_unix_command(command, fail_on_non_zero_rc=False)
+    return stdout.strip()
+
+
+@robot_log
+def check_kubernetes_object(kube_object, tester_function, additional_filter=".*", timeout=0, delay=0):
+    """"
+    This method executes kubectl get command with the given args, filters the output and checks the result with
+      the given tester_function.
+      :param kube_object: a dictionary, it represents a kubernetes objects,
+                          obj_type, obj_name, namespace, obj_count keys are required.
+      :param tester_function: this functoin checks the result and waits for the expected result
+                              - kubernetes object exists or not - to happen in a given time
+      :param additional_filter: use this regexp to filter further the results
+      :param timeout: wait <timeout> seconds for the result
+      :param delay: wait <delay> seconds before tester command
+    """""
+    command = "kubectl get {object} -n {ns_arg} 2>/dev/null | grep -w {name} | grep -E '{grep_arg}' | wc -l"
+    command = command.format(object=kube_object['obj_type'], name=kube_object['obj_name'],
+                             ns_arg=kube_object['namespace'], grep_arg=additional_filter)
+    tester_function(kube_object, timeout, command, delay)
+
+
+@robot_log
+def is_result_expected_within_given_time(command, expected_result, timeout, delay=0):
+    time.sleep(delay)
+    result = ex.execute_unix_command(command)
+    if result == expected_result:
+        return True
+    wait_until = datetime.now() + timedelta(seconds=timeout)
+    while result != expected_result and (datetime.now() < wait_until):
+        logger.info("datetime.now:" + str(datetime.now()))
+        logger.info("wait_until:" + str(wait_until))
+        logger.info("expected result: " + expected_result)
+        logger.info("result: " + result)
+        time.sleep(1)
+        result = ex.execute_unix_command(command)
+        if result == expected_result:
+            return True
+    return False
+
+
+def test_kubernetes_object_quality(kube_object, expected_result, filter=".*", timeout=30, delay=0):
+    tester_command = "kubectl get " + kube_object['obj_type'] + " --all-namespaces | grep -w " + \
+                     kube_object['obj_name'] + " | grep -E '" + filter + "' | wc -l"
+    res = is_result_expected_within_given_time(tester_command, expected_result, timeout, delay)
+    if not res:
+        log_command = "kubectl get " + kube_object['obj_type'] + " --all-namespaces | grep -w " + \
+                      kube_object['obj_name']
+        res = ex.execute_unix_command(log_command)
+        ex.execute_unix_command("kubectl describe " + kube_object['obj_type'] + " " + kube_object['obj_name'] + " -n " +
+                                kube_object['namespace'])
+        raise Exception("Not " + kube_object['obj_count'] + " " + kube_object['obj_type'] + " " +
+                        kube_object['obj_name'] + " is in expected (" + filter + ") state:" + res)
+    logger.console(kube_object['obj_count'] + " " + kube_object['obj_type'] + " " + kube_object['obj_name'] +
+                   " is in expected (" + filter + ") state.")
+
+
+def test_kubernetes_object_available(kube_object, timeout, tester_command, delay=0):
+    res = is_result_expected_within_given_time(tester_command, kube_object['obj_count'], timeout=timeout, delay=delay)
+    if not res:
+        describe_command = "kubectl describe " + kube_object['obj_type'] + " -n " + \
+                           kube_object['namespace'] + " " + kube_object['obj_name']
+        ex.execute_unix_command(describe_command, fail_on_non_zero_rc=False)
+        raise Exception("Not " + kube_object['obj_count'] + " " + kube_object['obj_type'] + " " +
+                        kube_object['obj_name'] + " is running!")
+    logger.console(kube_object['obj_count'] + " " + kube_object['obj_type'] + " " + kube_object['obj_name'] +
+                   " is running, as expected!")
+
+
+def test_kubernetes_object_not_available(kube_object, timeout, tester_command, delay=0):
+    res = is_result_expected_within_given_time(tester_command, expected_result="0", timeout=timeout, delay=delay)
+    if not res:
+        describe_command = "kubectl describe " + kube_object['obj_type'] + " -n " + \
+                           kube_object['namespace'] + " " + kube_object['obj_name']
+        ex.execute_unix_command(describe_command, fail_on_non_zero_rc=False)
+        raise Exception("At least 1 " + kube_object['obj_type'] + " " + kube_object['obj_name'] + " still exists!")
+    logger.console(kube_object['obj_type'] + " " + kube_object['obj_name'] + " does not exist, as expected!")
+
+
+def is_node_under_pressure(nodeslog):
+    return bool(nodeslog.find("pressure") != -1)
+
+
+def wait_if_pressure(timeout=pressure_default_timeout):
+    wait_until = datetime.now() + timedelta(seconds=timeout)
+    command = "kubectl get nodes -o json | jq '.items[] | \"\(.metadata.name) \(.spec.taints)\"'"
+    nodeslog = ex.execute_unix_command_as_root(command)
+    while (is_node_under_pressure(nodeslog)) and (datetime.now() < wait_until):
+        logger.info("datetime.now:" + str(datetime.now()))
+        logger.info("wait_until:" + str(wait_until))
+        logger.info("Node is under pressure found: " + nodeslog)
+        time.sleep(10)
+        nodeslog = ex.execute_unix_command_as_root(command)
+    if is_node_under_pressure(nodeslog):
+        raise Exception("Node pressure not resolved in time.")
+    else:
+        logger.info(nodeslog)
+
+
+@robot_log
+def check_url_running(filename, url):
+    command = "curl -s {url} > /dev/null ; echo -n $?"
+    result = ex.execute_unix_command_as_root(command.format(url=url))
+    if result == "0":
+        logger.console("{url} is running!".format(url=url))
+    else:
+        gather_logs("curl -s {url}".format(url=url), filename, LOG_DIR)
+        raise Exception("{url} is not running !".format(url=url))
+
+
+@robot_log
+def subprocess_cmd(command):
+    return subprocess.check_output(command, shell=True).strip()
+
+
+@robot_log
+def put_file(local_script_path, remote_script_path, permissions="777", user=root['username'], group=root['username']):
+    ex.get_ssh_library_instance().put_file(local_script_path, remote_script_path, permissions)
+    head, tail = os.path.split(remote_script_path)
+    command = 'ls -l ' + head + ' | grep ' + tail + ' | wc -l'
+    res = is_result_expected_within_given_time(command, expected_result="1", timeout=5)
+    if not res:
+        raise Exception("File not found at " + remote_script_path + "!")
+    ex.execute_unix_command_as_root('chgrp ' + group + ' ' + remote_script_path)
+    ex.execute_unix_command_as_root('chown ' + user + ' ' + remote_script_path)
+
+
+@robot_log
+def get_helm_chart_content(chart_name):
+    ex.execute_unix_command("helm fetch " + chart_name + " --untar --untardir /tmp")
+    return ex.execute_unix_command("ls /tmp/" + chart_name.split('/')[1] +
+                                   "/templates | awk -F . '{print $1}'").split('\r\n')
+
+
+@robot_log
+def get_cpupools():
+    node_map = {}
+    node_list = ex.execute_unix_command("kubectl get nodes -L=nodename | awk '{print $6}'| tail -n +2")
+    cmap_str = ex.execute_unix_command("kubectl get configmap -n kube-system {cm} -o yaml"
+                                       .format(cm=cpu_pooling_cm_name))
+    for nodename in node_list.splitlines():  # pylint: disable=too-many-nested-blocks
+        yamldict = yaml.load(cmap_str)
+        for key in yamldict['data']:
+            if nodename in yamldict['data'][key]:
+                worker_yaml = yaml.load(yamldict['data'][key])
+                pool_dict = {}
+                if worker_yaml['pools']:
+                    for pool in worker_yaml['pools']:
+                        pool_str = worker_yaml['pools'][pool]['cpus']
+                        pool_list = []
+                        for sub_list in pool_str.split(','):
+                            pool_list = pool_list + ([int(sub_list)] if '-' not in sub_list else
+                                                     range(int(sub_list.split('-')[0]),
+                                                           int(sub_list.split('-')[1]) + 1))
+                        pool_dict[pool] = pool_list
+                node_map[nodename] = pool_dict
+    return node_map
+
+
+@robot_log
+def get_cpu_allowed_list_from_pod(pod_name):
+    bash_command = "cat /proc/1/status | grep Cpus_allowed_list"
+    result = ex.execute_unix_command("kubectl exec `kubectl get pod | grep {0} | "
+                                     "awk '{{print $1}}'` -- {1}".format(pod_name, bash_command))
+    pool_list = []
+    for cpu in result.split(':')[1].split(','):
+        pool_list = pool_list + ([int(cpu)] if '-' not in cpu else range(int(cpu.split('-')[0]),
+                                                                         int(cpu.split('-')[1]) + 1))
+    return pool_list
+
+
+@robot_log
+def allowed_cpus_is_in_cpu_pool(allowed_cpus, cpu_pool):
+    for allowed in allowed_cpus:
+        if allowed not in cpu_pool:
+            return False
+    return True
+
+
+def decide_nodename():
+    nodename = 'caas_worker1'
+    command = "kubectl get node -L=nodename | awk {{'print $6'}} | tail -n +2"
+    node_names = ex.execute_unix_command(command)
+    if nodename not in node_names:
+        return node_names.splitlines()[0]
+    return nodename
+
+
+@robot_log
+def determine_accurate_running_time_of_obj(object_type, object_name):
+    hours = mins = secs = 0
+    cmd = "kubectl get {obj_type} --all-namespaces --no-headers=true | grep {obj_name} | awk '{{print $NF}}'" \
+        .format(obj_type=object_type, obj_name=object_name)
+    resp = ex.execute_unix_command(cmd)
+    pod_time = re.findall(r'\d{0,2}h|\d{0,3}m|\d{1,3}s', resp)
+    for t in pod_time:
+        if t[-1] == 'h':
+            hours = int(t[:-1])
+        elif t[-1] == 'm':
+            mins = int(t[:-1])
+        elif t[-1] == 's':
+            secs = int(t[:-1])
+
+    return datetime.now() - timedelta(hours=hours, minutes=mins, seconds=secs)
diff --git a/libraries/common/decorators_for_robot_functionalities.py b/libraries/common/decorators_for_robot_functionalities.py
new file mode 100644 (file)
index 0000000..e5e16e7
--- /dev/null
@@ -0,0 +1,76 @@
+import inspect
+from robot.conf import RobotSettings
+from robot.api import logger
+from robot.variables import VariableScopes
+from robot.running.timeouts import KeywordTimeout
+from robot.libraries.BuiltIn import BuiltIn
+
+
+BuiltIn().import_library('pabot.PabotLib')
+PABOT = BuiltIn().get_library_instance('pabot.PabotLib')
+
+
+# if both timeout and log decorator is used for one function, timeout decorator should be used first
+
+
+def robot_timeout(timeoutinseconds):
+    def timeout_decorator(func):
+        def wrapper(*args, **kwargs):
+            timeout_msg = func.__name__ + " timed out !!"
+            timeout = KeywordTimeout(timeoutinseconds, timeout_msg, VariableScopes(RobotSettings()))
+            timeout.start()
+            return timeout.run(func, args, kwargs)
+
+        return wrapper
+
+    return timeout_decorator
+
+
+def robot_log(func):
+    def wrapper(*args, **kwargs):
+        spec = inspect.getargspec(func)
+        for key in kwargs.iterkeys():
+            if key not in spec.args:
+                # if function is called from robot, and one of it's unnamed string parameters has '=' in it
+                # move this parameter from kwargs to args
+                l = list(args)
+                b = '{0}={1}'.format(key, kwargs[key])
+                l.append(b)
+                args = tuple(l)
+                kwargs.pop(key)
+
+        argnames = func.func_code.co_varnames[:func.func_code.co_argcount]
+        parameters = ": "
+        for entry in zip(argnames, args) + kwargs.items():
+            if 'self' not in entry:
+                parameters += ('%s=%r, ' % entry)
+        fname = func.func_name
+        logger.info("<span class='label pass'><span style='font-size: 1.25em'>ENTER: " + fname +
+                    "</span></span>" + parameters, html=True)
+        result = func(*args, **kwargs)
+        logger.info("<span class='label warn'><span style='font-size: 1.25em'>EXIT: " + fname +
+                    "</span></span>", html=True)
+        return result
+
+    return wrapper
+
+
+def pabot_lock(lock_name):
+    """Sets Pabot lock until the execution of the function
+    pabot_lock should be used after the robot_log if both function decorators are used at the same time"""
+
+    def pabot_lock_decorator(func):
+        def wrapper(*args, **kwargs):
+            PABOT.acquire_lock(lock_name)
+            logger.info(lock_name + " lock acquired on " + func.__name__)
+            result = None
+            try:
+                result = func(*args, **kwargs)
+            finally:
+                PABOT.release_lock(lock_name)
+                logger.info(lock_name + " lock released from " + func.__name__)
+            return result
+
+        return wrapper
+
+    return pabot_lock_decorator
diff --git a/libraries/common/execute_command.py b/libraries/common/execute_command.py
new file mode 100644 (file)
index 0000000..8df0240
--- /dev/null
@@ -0,0 +1,255 @@
+import re
+import time
+from datetime import datetime
+from datetime import timedelta
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+from robot.libraries.String import String
+from decorators_for_robot_functionalities import *
+from users import *
+from test_constants import *
+
+
+@robot_log
+def check_if_login_was_successful(login_output):
+    login_errors = ['authentication failure', 'name or service not known', 'permission denied']
+    for login_error in login_errors:
+        if re.search(login_error, login_output, re.IGNORECASE):
+            return False
+    return True
+
+
+class execute_command:  # pylint: disable=old-style-class
+    def __init__(self):
+        self._builtin = BuiltIn()
+        self._string = String()
+        self._builtin.import_library('SSHLibrary')
+        self._sshlibrary = self._builtin.get_library_instance('SSHLibrary')
+        self._default_user = {}
+        self._prompt = ':prompt:'
+        self._local_infra_int_ip_ipv4 = ''
+        self._key_exists = False
+
+    def get_ssh_library_instance(self):
+        return self._sshlibrary
+
+    def get_default_user(self):
+        return self._default_user
+
+    @robot_log
+    def open_connection_and_log_in(self, host, user, private_key=None, timeout="90s"):
+        self._sshlibrary.open_connection(host=host, timeout=timeout)
+        login_output = ''
+        wait_until = datetime.now() + timedelta(seconds=60)
+        while datetime.now() < wait_until:
+            time.sleep(1)
+            try:
+                if private_key is None:
+                    login_output = self._sshlibrary.login(user['username'], user['password'])
+                else:
+                    login_output = self._sshlibrary.login_with_public_key(user['username'],
+                                                                          private_key, user['password'])
+            except Exception:
+                logger.warn("Login was unsuccessful, trying again.")
+                continue
+            if check_if_login_was_successful(login_output):
+                self._configure_prompt()
+                logger.info("Login was successful.")
+                break
+        return login_output
+
+    @robot_log
+    def set_basic_connection(self, user, private_key=None):
+        self._default_user = user
+        stack_infos = self._builtin.get_library_instance('stack_infos')
+        self.open_connection_and_log_in(stack_infos.get_floating_ip(), user, private_key)
+        self._local_infra_int_ip_ipv4 = self.get_interface_ipv4_address(stack_infos.get_crf_nodes())
+        self._key_exists = self.check_id_rsa_exists()
+        self.stop_auditd_service()
+
+    @robot_log
+    def _configure_prompt(self):
+        self._sshlibrary.write('export PS1=' + self._prompt)
+        self._sshlibrary.read_until_regexp('(?m)^' + self._prompt + '.*')
+
+    @robot_log
+    def su_as(self, user):
+        def check_if_su_was_succesful(login_output):
+            if 'incorrect password' in login_output or 'Authentication failure' in login_output:
+                return False
+            return True
+
+        self._sshlibrary.write('su ' + user['username'])
+        self._sshlibrary.read_until('Password:')
+        self._sshlibrary.write(user['password'])
+        output = self._sshlibrary.read_until(self._prompt)
+        if not check_if_su_was_succesful(output):
+            raise Exception(output)
+
+    @robot_log
+    def sudo_as_root(self):
+        self._sshlibrary.write('sudo -s')
+        self._sshlibrary.read_until(self._prompt)
+
+    @robot_log
+    def exit_from_user(self):
+        self._sshlibrary.write('exit')
+        self._sshlibrary.read_until(self._prompt)
+
+    @robot_log
+    def execute_unix_command(self,
+                             command,
+                             fail_on_non_zero_rc=True,
+                             delay="90s",
+                             skip_prompt_in_command_output=False,
+                             user={}):
+        """
+        This method executes a linux command via the SSHlibrary connection.
+        The user account which issues the command, is the same as which the connection has opened for (by default)
+        The command can be also executed by switching (su) to another user (e.g. parameter usage: user = "root")
+
+        :param command:
+        :param fail_on_non_zero_rc: the command will fail if return code is nonzero
+        :param delay:
+        :param skip_prompt_in_command_output:
+        :param user: switch to user, by default the command is executed with the current user
+        for which the ssh connection was opened
+
+        :return: stdout: command output is returned
+        """
+        user_changed = False
+        self._sshlibrary.set_client_configuration(timeout=delay)
+        if user == root:
+            self.sudo_as_root()
+            user_changed = True
+        elif bool(user) and user != self._default_user:
+            self.su_as(user)
+            user_changed = True
+
+        self._sshlibrary.write(command)
+        try:
+            if skip_prompt_in_command_output:
+                stdout = self._sshlibrary.read_until_regexp("(^|\n| )" + self._prompt + "$")
+            else:
+                stdout = self._sshlibrary.read_until(prompt)
+        except Exception as err:
+            stdout = unicode(err)
+            ctrl_c = self._builtin.evaluate('chr(int(3))')
+            self._sshlibrary.write_bare(ctrl_c)
+            self._sshlibrary.read_until(prompt)
+        stdout = re.sub(prompt + '$', '', stdout).strip()
+        self._sshlibrary.write('echo error code: $?')
+        error_code = self._sshlibrary.read_until(prompt)
+        logger.trace("Error code variable value (befor processing)=" + error_code)
+        error_code = self._string.get_lines_matching_regexp(error_code,
+                                                            pattern='error code: \\d+').split(':')[1].strip()
+        logger.trace("Error code variable value (after processing)=" + error_code)
+        self._sshlibrary.set_client_configuration(timeout="60s")
+        if user_changed:
+            self.exit_from_user()
+        fail_on_non_zero_rc = self._builtin.convert_to_boolean(fail_on_non_zero_rc)
+        if fail_on_non_zero_rc:
+            if error_code != '0':
+                raise Exception('command: ' + command + '\nreturn code: ' + error_code + '\noutput: ' + stdout)
+            return stdout
+        else:
+            return [stdout, error_code]
+
+    @robot_log
+    def execute_unix_command_as_root(self,
+                                     command,
+                                     fail_on_non_zero_rc=True,
+                                     delay="90s",
+                                     skip_prompt_in_command_output=False):
+        return self.execute_unix_command(command, fail_on_non_zero_rc, delay, skip_prompt_in_command_output, root)
+
+    @robot_log
+    def ssh_to_another_node(self, host, user):
+        self._sshlibrary.write('ssh ' + user['username'] + '@' + host + ' -o "StrictHostKeyChecking no"')
+        if not self._key_exists:
+            logger.info("Login with password")
+            self._sshlibrary.read_until("'s password:")
+            self._sshlibrary.write(user['password'])
+        ssh_regexp = re.compile(r"\[{0}@.*$|authentication failure|name or service not known|permission denied"
+                                .format(user["username"]), re.IGNORECASE)
+        stdout = self._sshlibrary.read_until_regexp(ssh_regexp)
+        if not check_if_login_was_successful(stdout):
+            raise Exception("Login to another node FAILED")
+        self._configure_prompt()
+
+    @robot_log
+    def execute_unix_command_on_remote_as_root(self,
+                                               command,
+                                               host,
+                                               user={},
+                                               fail_on_non_zero_rc=True,
+                                               delay="90s",
+                                               skip_prompt_in_command_output=False):
+        if self._local_infra_int_ip_ipv4 != host:
+            if not user:
+                user = self._default_user
+            self.ssh_to_another_node(host, user)
+            output = self.execute_unix_command_as_root(command, fail_on_non_zero_rc, delay,
+                                                       skip_prompt_in_command_output)
+            self._sshlibrary.write('exit')
+            self._sshlibrary.read_until(self._prompt)
+        else:
+            output = self.execute_unix_command_as_root(command, fail_on_non_zero_rc, delay,
+                                                       skip_prompt_in_command_output)
+        return output
+
+    @robot_log
+    def execute_unix_command_on_remote_as_user(self,
+                                               command,
+                                               host,
+                                               user={},
+                                               fail_on_non_zero_rc=True,
+                                               delay="90s",
+                                               skip_prompt_in_command_output=False):
+        if not user:
+            user = self._default_user
+        if self._local_infra_int_ip_ipv4 != host:
+            self.ssh_to_another_node(host, user)
+            output = self.execute_unix_command(command, fail_on_non_zero_rc, delay, skip_prompt_in_command_output,
+                                               user=user)
+            self._sshlibrary.write('exit')
+            self._sshlibrary.read_until(self._prompt)
+        else:
+            output = self.execute_unix_command(command, fail_on_non_zero_rc, delay, skip_prompt_in_command_output,
+                                               user=user)
+        return output
+
+    @robot_log
+    def get_interface_ipv4_address(self, nodes):
+        for key in nodes:
+            if self.execute_unix_command("ip a | grep " + nodes[key] + " | wc -l") == "1":
+                return nodes[key]
+        return None
+
+    @robot_log
+    def get_interface_ipv6_address(self, interface):
+        return self.execute_unix_command_as_root('ip addr list ' + interface +
+                                                 ' | grep --color=no -o -P "(?<=inet6 ).*(?=/.*)"')
+
+    @robot_log
+    def check_id_rsa_exists(self):
+        _, err_code = self.execute_unix_command("ls /home/{0}/.ssh/id_rsa".format(self._default_user["username"]),
+                                                fail_on_non_zero_rc=False)
+        return err_code == '0'
+
+    @robot_log
+    def stop_auditd_service(self):
+        stack_infos = self._builtin.get_library_instance('stack_infos')
+        if stack_infos.get_virtual_env():
+            all_nodes = stack_infos.get_all_nodes()
+            if not all_nodes:
+                logger.info("Nodes dictionary is empty, nothing to check.")
+                return
+            for node in all_nodes.itervalues():
+                command = "sed -i \"s#RefuseManualStop=yes#RefuseManualStop=no#g\" " \
+                          "/usr/lib/systemd/system/auditd.service"
+                self.execute_unix_command_on_remote_as_root(command, node)
+                command = "systemctl daemon-reload"
+                self.execute_unix_command_on_remote_as_root(command, node)
+                command = "systemctl stop auditd.service"
+                self.execute_unix_command_on_remote_as_root(command, node)
diff --git a/libraries/common/stack_infos.py b/libraries/common/stack_infos.py
new file mode 100644 (file)
index 0000000..4ddf52f
--- /dev/null
@@ -0,0 +1,77 @@
+import sys
+import os
+import json
+import paramiko
+from robot.libraries.BuiltIn import BuiltIn
+from users import *
+sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
+
+
+class stack_infos:  # pylint: disable=old-style-class
+
+    INVENTORY_PATH = "/opt/cmframework/scripts/inventory.sh"
+
+    def __init__(self):
+        self._floating_ip = BuiltIn().get_variable_value("${floating_ip}")
+
+        try:
+            client = paramiko.SSHClient()
+            client.load_system_host_keys()
+            client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
+            client.connect(self._floating_ip,
+                           username=cloudadmin['username'],
+                           password=cloudadmin['password'])
+            _, stdout, _ = client.exec_command(self.INVENTORY_PATH)
+            self._inventory = json.loads(stdout.read())
+        finally:
+            client.close()
+
+        self._crf_nodes = self.get_node_ip_based_on_caas_profile("caas_master")
+        if not self._crf_nodes:
+            raise Exception("crf_nodes dictionary is empty!")
+        self._storage_nodes = self.get_node_ip_based_on_caas_profile("caas_storage")
+        self._worker_nodes = self.get_node_ip_based_on_caas_profile("caas_worker")
+
+    def get_floating_ip(self):
+        return self._floating_ip
+
+    def get_crf_nodes(self):
+        return self._crf_nodes
+
+    def get_storage_nodes(self):
+        return self._storage_nodes
+
+    def get_worker_nodes(self):
+        return self._worker_nodes
+
+    def get_all_nodes(self):
+        all_nodes = self._crf_nodes.copy()
+        all_nodes.update(self._storage_nodes)
+        all_nodes.update(self._worker_nodes)
+        return all_nodes
+
+    def get_inventory(self):
+        return self._inventory
+
+    def get_node_ip_based_on_caas_profile(self, caas_profile):  # pylint: disable=invalid-name
+        node_ip = {}
+        if caas_profile in self._inventory:
+            for node in self._inventory[caas_profile]:
+                node_ip[node] = self._inventory["_meta"]["hostvars"][node]["networking"]["infra_internal"]["ip"]
+        return node_ip
+
+    def get_infra_int_if(self):
+        interface = self._inventory["_meta"]["hostvars"]["controller-1"]["networking"]["infra_internal"]["interface"]
+        return interface
+
+    def get_infra_ext_if(self):
+        iface = self._inventory["_meta"]["hostvars"]["controller-1"]["networking"]["infra_external"]["interface"]
+        return iface
+
+    def get_infra_storage_if(self):
+        iface = self._inventory["_meta"]["hostvars"]["controller-1"]["networking"]["infra_storage_cluster"]["interface"]
+        return iface
+
+    def get_virtual_env(self):
+        virtual_env = self._inventory["all"]["vars"]["virtual_env"]
+        return virtual_env
diff --git a/libraries/common/test_constants.py b/libraries/common/test_constants.py
new file mode 100644 (file)
index 0000000..b805fc0
--- /dev/null
@@ -0,0 +1,271 @@
+import os
+
+prompt = ':prompt:'
+
+int_if_name_in_openstack = 'infra-int'
+
+reg = os.getenv('REG')
+reg_port = os.getenv('REG_PORT')
+reg_path = os.getenv('REG_PATH')
+test_image = "kubernetespause"
+
+source_folder = os.getenv('SOURCE_FOLDER')
+vnf_id = os.getenv('STACK_ID')
+cbam_py = "{}/scripts/cbam.py".format(source_folder)
+
+registry_cert = '/etc/docker-registry/registry?.pem'
+registry_key = '/etc/docker-registry/registry?-key.pem'
+registry_cacert = '/etc/docker-registry/ca.pem'
+
+ROBOT_LOG_PATH = "/tmp/"
+
+registry = {'url': reg, 'port': reg_port}
+
+int_sshd_config_name = "sshd_config_int"
+ext_sshd_config_name = "sshd_config_ext"
+sshd_port = "22"
+
+dns_masq_port = "53"
+kube_dns_port = "10053"
+min_dns_replica = 1
+max_dns_replica = 3
+test_address1 = 'google.com'
+test_address2 = 'tiller.kube-system.svc.rec.io'
+
+crf_node_openstack_file_types = ["user_config.yaml"]
+
+pressure_default_timeout = 600
+
+# TC014 constant
+INFLUXDB_URL = "http://influxdb.kube-system.svc.nokia.net:8086"
+GRAFANA_URL = "http://monitoring-grafana.kube-system.svc.nokia.net:8080"
+
+# TC016 constant
+docker_size_quota = '2G'
+
+# TC Fluentd
+ELASTICSEARCH_URL = "http://elasticsearch-logging.kube-system.svc.nokia.net:9200"
+USER_CONFIG_PATH = "/opt/nokia/userconfig/user_config.yaml"
+ES_IDX_PREFIX = "caas"
+
+test_chart = dict(name="busybox3", release_name="custom-oper-test", chart_version="3.3.3",
+                  repo="default/",
+                  kube_objects=[dict(obj_type="pod", obj_name="busybox3", obj_count="1",
+                                     namespace="kube-system")])
+
+su_test_chart = dict(name="su-test", release_name="su-test", chart_version="1.1.1",
+                     su_version="1.1.1", repo="default/",
+                     kube_objects=[dict(obj_type="pod", obj_name="su-test", obj_count="10",
+                                        namespace="kube-system")])
+
+su_test_chart1 = dict(name="su-test", release_name="su-test", chart_version="1.1.2",
+                      su_version="1.1.1", repo="default/",
+                      kube_objects=[dict(obj_type="pod", obj_name="su-test", obj_count="10",
+                                         namespace="kube-system")])
+
+su_test_chart_f = dict(name="su-test_f", release_name="su-test", chart_version="1.1.4",
+                       su_version="1.1.1", repo="default/",
+                       kube_objects=[dict(obj_type="pod", obj_name="su-test_f", obj_count="10",
+                                          namespace="kube-system")])
+
+pv_test_pod = dict(obj_type="pod", obj_name="pv-test-deployment", obj_count="2", namespace="default")
+pv_test_pvc = dict(obj_type="pvc", obj_name="pvc", obj_count="1", namespace="default")
+kube_controller_manager = dict(obj_type="pod", obj_name="kube-controller-manager", obj_count="3", namespace="kube-system")
+influxdb_service = dict(obj_type="service", obj_name="influxdb", obj_count="1", namespace="kube-system")
+influxdb_deployment = dict(obj_type="deployment", obj_name="influxdb", obj_count="1", namespace="kube-system")
+grafana_service = dict(obj_type="service", obj_name="monitoring-grafana", obj_count="1", namespace="kube-system")
+grafana_deployment = dict(obj_type="deployment", obj_name="monitoring-grafana", obj_count="1", namespace="kube-system")
+danmnet_pods1 = dict(obj_type="pod", obj_name="danmnet-pods1", obj_count="4", namespace="default",    ip_list=[])
+danmnet_pods2 = dict(obj_type="pod", obj_name="danmnet-pods2", obj_count="3", namespace="default",    ip_list=[])
+danmnet_pods3 = dict(obj_type="pod", obj_name="danmnet-pods3", obj_count="4", namespace="default",    ip_list=[])
+danmnet_pods4 = dict(obj_type="pod", obj_name="danmnet-pods4", obj_count="5", namespace="kube-system",ip_list=[])
+danmnet_pods5 = dict(obj_type="pod", obj_name="danmnet-pods5", obj_count="1", namespace="kube-system",ip_list=[])
+danmnet_pods6 = dict(obj_type="pod", obj_name="danmnet-pods6", obj_count="6", namespace="default",    ip_list=[])
+danmnet_pods7 = dict(obj_type="pod", obj_name="danmnet-pods7", obj_count="5", namespace="default",    ip_list=[])
+danmnet_pods8 = dict(obj_type="pod", obj_name="danmnet-pods8", obj_count="1", namespace="default",    ip_list=[])
+danmnet_pods9 = dict(obj_type="pod", obj_name="danmnet-pods9", obj_count="1", namespace="kube-system",ip_list=[])
+danmnet_pods10 = dict(obj_type="pod", obj_name="danmnet-pods10", obj_count="1", namespace="default",  ip_list=[])
+danmnet_pods11 = dict(obj_type="pod", obj_name="danmnet-pods11", obj_count="1", namespace="default",  ip_list=[])
+danmnet_pods12 = dict(obj_type="pod", obj_name="danmnet-pods12", obj_count="1", namespace="default",  ip_list=[])
+danmnet_pods13 = dict(obj_type="pod", obj_name="danmnet-pods13", obj_count="1", namespace="default",  ip_list=[])
+danmnet_pods14 = dict(obj_type="pod", obj_name="danmnet-pods14", obj_count="1", namespace="default",  ip_list=[])
+danmnet_pods_all = dict(obj_type="pod", obj_name="danmnet-pods", obj_count="0", namespace="default",    ip_list=[])
+
+php_apache_pod = dict(obj_type="pod", obj_name="php-apache", obj_count="1", namespace="default")
+podinfo_pod = dict(obj_type="pod", obj_name="podinfo", obj_count="2", namespace="kube-system")
+load_generator_for_apache = dict(obj_type="pod", obj_name="load-generator-for-apache", obj_count="1", namespace="default")
+http_traffic_gen = dict(obj_type="pod", obj_name="http-traffic-gen", obj_count="1", namespace="default")
+
+pods_skipped = ['load-generator-for-apache', 'php-apache-deployment', 'pv-test-deployment', 'danmnet-pods',
+                test_chart['kube_objects'][0]['obj_name'], 'registry-update', 'su-test', 'cpu-pooling', 'swift-update',
+                'su-test', 'podinfo', 'tennet-pod']
+
+services_skipped = ['selinux-policy-migrate-local-changes', 'cloud-final.service', 'kdump.service',
+                    'postfix.service']
+
+danmnets_properties = {
+    'd_test-net1':   {'name':"test-net1", 'Validation':"true",  'NetworkType':"",        'namespace':"default",     'host_if':"", 'rt_tables':"201",  'routes':"", 'vxlan':"", 'vlan':""},
+    'd_test-net2':   {'name':"test-net2", 'Validation':"true",  'NetworkType':"",        'namespace':"default",     'host_if':"vx_test-net2", 'rt_tables':"11", 'routes':"10.0.0.0/32: 10.0.0.50", 'vxlan':"50", 'vlan':""},
+    'ks_test-net2':  {'name':"test-net2", 'Validation':"true",  'NetworkType':"",        'namespace':"kube-system", 'host_if':"vx_test-net2", 'rt_tables':"11", 'routes':"10.1.1.0/32: 10.1.1.1", 'vxlan':"50", 'vlan':""},
+    'd_test-net4':   {'name':"test-net4", 'Validation':"true",  'NetworkType':"",        'namespace':"default",     'host_if':"", 'rt_tables':"13", 'routes':"", 'vxlan':"", 'vlan':""},
+    'd_test-net5':   {'name':"test-net5", 'Validation':"true",  'NetworkType':"",        'namespace':"default",     'host_if':"", 'rt_tables':"14", 'routes':"", 'vxlan':"", 'vlan':""},
+    'd_test-net6':   {'name':"test-net6", 'Validation':"true",  'NetworkType':"",        'namespace':"default",     'host_if':"vx_test-net6", 'rt_tables':"", 'routes':"", 'vxlan':"52", 'vlan':""},
+    'd_test-net7':   {'name':"test-net7", 'Validation':"true",  'NetworkType':"",        'namespace':"default",     'host_if':"vx_test-net7", 'rt_tables':"15", 'routes':"", 'vxlan':"53", 'vlan':""},
+    'd_test-net8':   {'name':"test-net8", 'Validation':"true",  'NetworkType':"",        'namespace':"default",     'host_if':"vx_test-net8", 'rt_tables':"15", 'routes':"10.10.0.0/32: 10.10.0.1", 'vxlan':"50", 'vlan':""},
+    'd_test-net13':  {'name':"test-net13", 'Validation':"true",  'NetworkType':"",        'namespace':"default",    'host_if':"vx_test-net13", 'rt_tables':"20", 'routes':"", 'vxlan':"56", 'vlan':""},
+    'd_test-net15':  {'name':"test-net15", 'Validation':"true",  'NetworkType':"",        'namespace':"default",    'host_if':"test-net15.1", 'rt_tables':"22", 'routes':"", 'vxlan':"", 'vlan':"1"},
+    'd_test-net16':  {'name':"test-net16", 'Validation':"true",  'NetworkType':"",        'namespace':"default",    'host_if':"test-net16.4094", 'rt_tables':"23", 'routes':"", 'vxlan':"", 'vlan':"4094"},
+    'd_test-net20':  {'name':"test-net20", 'Validation':"true",  'NetworkType':"",        'namespace':"default",    'host_if':"vx_test-net20", 'rt_tables':"27", 'routes':"", 'vxlan':"", 'vlan':""},
+    'd_test-net21':  {'name':"test-net21", 'Validation':"true",  'NetworkType':"",        'namespace':"default",    'host_if':"vx_test-net21", 'rt_tables':"28", 'routes':"", 'vxlan':"16777214", 'vlan':""},
+    'd_test-net23':  {'name':"test-net23", 'Validation':"true",  'NetworkType':"",        'namespace':"default",    'host_if':"vx_test-net23", 'rt_tables':"30", 'routes':"", 'vxlan':"", 'vlan':""},
+    'd_test-net24':  {'name':"test-net24", 'Validation':"false", 'NetworkType':"flannel", 'namespace':"default",    'host_if':"", 'rt_tables':"31", 'routes':"", 'vxlan':"58", 'vlan':"57"},
+    'd_test-net25':  {'name':"test-net25", 'Validation':"true",  'NetworkType':"",        'namespace':"default",    'host_if':"test-net25.58", 'rt_tables':"10", 'routes':"10.10.0.0/32: 10.10.0.40", 'vxlan':"", 'vlan':"58"},
+    'd_test-net26':  {'name':"test-net26", 'Validation':"true",  'NetworkType':"",        'namespace':"default",    'host_if':"vx_test-net26", 'rt_tables':"10", 'routes':"", 'vxlan':"60", 'vlan':""},
+    'ks_test-net27': {'name':"test-net27", 'Validation':"true",  'NetworkType':"",        'namespace':"kube-system",'host_if':"vx_test-net27", 'rt_tables':"10", 'routes':"", 'vxlan':"61", 'vlan':""},
+    'd_test-net28':  {'name':"test-net28", 'Validation':"true",  'NetworkType':"",        'namespace':"default",    'host_if':"", 'rt_tables':"33", 'routes':"", 'vxlan':"50", 'vlan':""},
+    'ks_test-net29': {'name':"test-net29", 'Validation':"true",  'NetworkType':"",        'namespace':"kube-system",'host_if':"", 'rt_tables':"34", 'routes':"", 'vxlan':"50", 'vlan':""},
+    'd_test-net30':  {'name':"test-net30", 'Validation':"true",  'NetworkType':"",        'namespace':"default",     'host_if':"", 'rt_tables':"10", 'routes':"10.10.0.0/32: 10.10.0.40", 'vxlan':"", 'vlan':""},
+}
+
+danmnets_error = {
+    'd_test-net3':   {'name':"test-net3", 'Validation':"false",  'NetworkType':"",       'namespace':"default",     'host_if':"", 'rt_tables':"12", 'routes':"", 'vxlan':"51", 'vlan':""},
+    'd_test-net9':   {'name':"test-net9", 'Validation':"false",  'NetworkType':"",       'namespace':"default",     'host_if':"", 'rt_tables':"155", 'routes':"", 'vxlan':"55", 'vlan':""},
+    'd_test-net10':  {'name':"test-net10", 'Validation':"false", 'NetworkType':"",        'namespace':"default",    'host_if':"", 'rt_tables':"18", 'routes':"", 'vxlan':"56", 'vlan':""},
+    'd_test-net11':  {'name':"test-net11", 'Validation':"false", 'NetworkType':"",        'namespace':"default",    'host_if':"", 'rt_tables':"18", 'routes':"", 'vxlan':"55", 'vlan':""},
+    'd_test-net12':  {'name':"test-net12", 'Validation':"false", 'NetworkType':"",        'namespace':"default",    'host_if':"", 'rt_tables':"19", 'routes':"", 'vxlan':"55", 'vlan':""},
+    'd_test-net14':  {'name':"test-net14", 'Validation':"true", 'NetworkType':"",        'namespace':"default",    'host_if':"", 'rt_tables':"21", 'routes':"", 'vxlan':"", 'vlan':""},
+    'd_test-net17':  {'name':"test-net17", 'Validation':"false", 'NetworkType':"",        'namespace':"default",    'host_if':"", 'rt_tables':"24", 'routes':"", 'vxlan':"", 'vlan':"4095"},
+    'd_test-net18':  {'name':"test-net18", 'Validation':"false", 'NetworkType':"",        'namespace':"default",    'host_if':"", 'rt_tables':"25", 'routes':"", 'vxlan':"", 'vlan':"4096"},
+    'd_test-net19':  {'name':"test-net19", 'Validation':"true", 'NetworkType':"",        'namespace':"default",    'host_if':"", 'rt_tables':"26", 'routes':"", 'vxlan':"", 'vlan':""},
+    'd_test-net22':  {'name':"test-net22", 'Validation':"false", 'NetworkType':"",        'namespace':"default",    'host_if':"", 'rt_tables':"29", 'routes':"", 'vxlan':"16777215", 'vlan':""},
+    }
+
+cpu_pooling_pod1 = dict(obj_type="pod", obj_name="cpu-pooling-1", namespace="default", obj_count="1")
+cpu_pooling_pod2 = dict(obj_type="pod", obj_name="cpu-pooling-2", namespace="default", obj_count="1")
+cpu_pooling_pod3 = dict(obj_type="pod", obj_name="cpu-pooling-3", namespace="default", obj_count="1")
+cpu_pooling_pod4 = dict(obj_type="pod", obj_name="cpu-pooling-4", namespace="default", obj_count="1")
+cpu_pooling_pod5 = dict(obj_type="pod", obj_name="cpu-pooling-5", namespace="default", obj_count="1")
+cpu_pooling_pod6 = dict(obj_type="pod", obj_name="cpu-pooling-6", namespace="default", obj_count="1")
+cpu_pooling_pod7 = dict(obj_type="pod", obj_name="cpu-pooling-7", namespace="default", obj_count="1")
+cpu_pooling_pod8 = dict(obj_type="pod", obj_name="cpu-pooling-8", namespace="default", obj_count="1")
+cpu_pooling_pod9 = dict(obj_type="replicaset", obj_name="cpu-pooling-9", namespace="default", obj_count="1")
+cpu_pooling_pod10 = dict(obj_type="replicaset", obj_name="cpu-pooling-10", namespace="default", obj_count="1")
+cpu_pooling_pod11 = dict(obj_type="replicaset", obj_name="cpu-pooling-11", namespace="default", obj_count="1")
+
+cpu_pooling_setter = dict(obj_type="pod", obj_name="cpu-setter", namespace="kube-system", obj_count="1")
+
+cpu_pooling_cm_name = "cpu-pooler-configmap"
+
+clusternetworks_properties = {
+    'cnet_01': {'name': 'cnet-01', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': 'ext'},
+    'cnet_02': {'name': 'cnet-02', 'NetworkType': 'ipvlan', 'host_if': 'cnet02.502', 'iface_type': 'ext'},
+    'cnet_03': {'name': 'cnet-03', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet03', 'iface_type': 'int'},
+    'cnet_04': {'name': 'cnet-04', 'NetworkType': 'ipvlan', 'host_if': 'cnet04.504', 'iface_type': 'ext'},
+    'cnet_05': {'name': 'cnet-05', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': 'ext'},
+    'cnet_06': {'name': 'cnet-06', 'NetworkType': 'ipvlan', 'host_if': 'cnet06.506', 'iface_type': 'ext'},
+    'cnet_07': {'name': 'cnet-07', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': 'int'},
+    'cnet_08': {'name': 'cnet-08', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': ''},
+    'cnet_09': {'name': 'cnet-09', 'NetworkType': 'ipvlan', 'host_if': '', 'iface_type': ''},
+}
+
+clusternetworks_error_properties = {
+    'cnet_invalid_01':    {'name': 'cnet-invalid-01'},
+    'cnet_invalid_02_01': {'name': 'cnet-invalid-02-01'},
+    'cnet_invalid_02_02': {'name': 'cnet-invalid-02-02'},
+    'cnet_invalid_03':    {'name': 'cnet-invalid-03'},
+    'cnet_invalid_04_01': {'name': 'cnet-invalid-04-01'},
+    'cnet_invalid_04_02': {'name': 'cnet-invalid-04-02'},
+    'cnet_invalid_05':    {'name': 'cnet-invalid-05'},
+    'cnet_invalid_06':    {'name': 'cnet-invalid-06'},
+    'cnet_invalid_07':    {'name': 'cnet-invalid-07'},
+    'cnet_invalid_08':    {'name': 'cnet-invalid-08'},
+    'cnet_invalid_09':    {'name': 'cnet-invalid-09'},
+    'cnet_invalid_10':    {'name': 'cnet-invalid-10'},
+    'cnet_invalid_11':    {'name': 'cnet-invalid-11'},
+    'cnet_invalid_12':    {'name': 'cnet-invalid-12'},
+}
+
+tenantconfig_properties = {
+    'tconf_01': {'name': "tconf-01"},
+    'tconf_02': {'name': "tconf-02"},
+    'tconf_03': {'name': "tconf-03"},
+    'tconf_04': {'name': "tconf-04"},
+    'tconf_05': {'name': "tconf-05"},
+    'tconf_06': {'name': "tconf-06"},
+    'tconf_07': {'name': "tconf-07"},
+    'tconf_08': {'name': "tconf-08"},
+}
+
+tenantconfig_error_properties = {
+    'tconf_invalid_01': {'name':"tconf-invalid-01"},
+    'tconf_invalid_02': {'name':"tconf-invalid-02"},
+    'tconf_invalid_03': {'name':"tconf-invalid-03"},
+    'tconf_invalid_04': {'name':"tconf-invalid-04"},
+    'tconf_invalid_05': {'name':"tconf-invalid-05"},
+    'tconf_invalid_06': {'name':"tconf-invalid-06"},
+    'tconf_invalid_07': {'name':"tconf-invalid-07"},
+    'tconf_invalid_08': {'name':"tconf-invalid-08"},
+    'tconf_invalid_09': {'name':"tconf-invalid-09"},
+}
+
+tenantnetwork_properties = {
+    'tennet_01': {'name': "tennet-01", 'NetworkType': 'ipvlan', 'host_if': 'vx_tnet',     'iface_type': 'ext'},
+    'tennet_02': {'name': "tennet-02", 'NetworkType': 'ipvlan', 'host_if': 'tnet02.1000', 'iface_type': 'int'},
+    'tennet_03': {'name': "tennet-03", 'NetworkType': 'ipvlan', 'host_if': 'tnet03.1001', 'iface_type': 'int'},
+    'tennet_04': {'name': "tennet-04", 'NetworkType': 'ipvlan', 'host_if': 'tnet04.2000', 'iface_type': 'storage'},
+    'tennet_05': {'name': "tennet-05", 'NetworkType': 'ipvlan', 'host_if': 'tnet05.1002', 'iface_type': 'int'},
+    'tennet_06': {'name': "tennet-06", 'NetworkType': 'ipvlan', 'host_if': 'tnet06.1003', 'iface_type': 'int'},
+}
+
+tenantnetwork_error_properties = {
+    'tennet_invalid_01':     {'name': 'tennet-invalid-01'},
+    'tennet_invalid_02':     {'name': 'tennet-invalid-02'},
+    'tennet_invalid_03_01':  {'name': 'tennet-invalid-03-01'},
+    'tennet_invalid_03_02':  {'name': 'tennet-invalid-03-02'},
+    'tennet_invalid_04_01':  {'name': 'tennet-invalid-04-01'},
+    'tennet_invalid_04_02':  {'name': 'tennet-invalid-04-02'},
+    'tennet_invalid_05':     {'name': 'tennet-invalid-05'},
+    'tennet_invalid_06':     {'name': 'tennet-invalid-06'},
+    'tennet_invalid_07_01':  {'name': 'tennet-invalid-07-01'},
+    'tennet_invalid_07_02':  {'name': 'tennet-invalid-07-02'},
+    'tennet_invalid_08':     {'name': 'tennet-invalid-08'},
+    'tennet_invalid_09':     {'name': 'tennet-invalid-09'},
+    'tennet_invalid_10':     {'name': 'tennet-invalid-10'},
+    'tennet_invalid_11':     {'name': 'tennet-invalid-11'},
+}
+
+network_attach_properties = {
+    'cnet_pod1': {'name': 'cnet-pod1', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod1', 'routes':"10.0.0.0/32: 10.5.1.1"},
+    'cnet_pod2': {'name': 'cnet-pod2', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod2'},
+    'cnet_pod3': {'name': 'cnet-pod3', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod3'},
+    'cnet_pod4': {'name': 'cnet-pod4', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod4'},
+    'cnet_pod5': {'name': 'cnet-pod5', 'NetworkType': 'ipvlan', 'host_if': ''},
+    'cnet_pod6': {'name': 'cnet-pod6', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod6'},
+    'cnet_pod7': {'name': 'cnet-pod7', 'NetworkType': 'ipvlan', 'host_if': 'vx_cnet-pod7'},
+}
+
+tenantnetwork_attach_properties = {
+    'tennet_attach_01': {'name': 'tennet-attach-01', 'namespace': 'default',     'NetworkType': 'ipvlan', 'host_if': '', 'routes': "10.10.1.0/24: 10.240.1.100"},
+    'tennet_attach_02': {'name': 'tennet-attach-02', 'namespace': 'kube-system', 'NetworkType': 'ipvlan', 'host_if': '', 'routes':"10.10.2.0/24: 10.240.2.1"},
+    'tennet_attach_03': {'name': 'tennet-attach-03', 'namespace': 'default',     'NetworkType': 'ipvlan', 'host_if': ''},
+    'tennet_attach_04': {'name': 'tennet-attach-04', 'namespace': 'default',     'NetworkType': 'ipvlan', 'host_if': '', 'flannel_pool': {'start': '10.244.0.1', 'end': '10.244.255.254'}},
+    'tennet_attach_05': {'name': 'tennet-attach-05', 'namespace': 'default',     'NetworkType': 'ipvlan', 'host_if': ''},
+    'tennet_attach_06': {'name': 'tennet-attach-06', 'namespace': 'default',     'NetworkType': 'ipvlan', 'host_if': ''},
+    'tennet_attach_07': {'name': 'tennet-attach-07', 'namespace': 'default',     'NetworkType': 'ipvlan', 'host_if': ''},
+}
+
+
+tennet_pod1  = dict(obj_type="pod", obj_name="tennet-pod-01", obj_count="4", namespace="default",     ip_list=["10.240.1.1", "10.240.1.8", "10.240.1.9", "10.240.1.254"])
+tennet_pod2  = dict(obj_type="pod", obj_name="tennet-pod-02", obj_count="4", namespace="default",     ip_list=["10.240.1.2", "10.240.1.3", "10.240.1.4", "10.240.1.5", "10.240.1.6", "10.240.1.7"])
+tennet_pod3  = dict(obj_type="pod", obj_name="tennet-pod-03", obj_count="4", namespace="default",     ip_list=["10.240.1.1", "10.240.1.8", "10.240.1.9", "10.240.1.254"])
+tennet_pod4  = dict(obj_type="pod", obj_name="tennet-pod-04", obj_count="5", namespace="kube-system", ip_list=["10.240.2.2", "10.240.2.3", "10.240.2.4", "10.240.2.5", "10.240.2.6"])
+tennet_pod5  = dict(obj_type="pod", obj_name="tennet-pod-05", obj_count="1", namespace="kube-system", ip_list=[])
+tennet_pod6  = dict(obj_type="pod", obj_name="tennet-pod-06", obj_count="4", namespace="default",     ip_list=[])
+tennet_pod7  = dict(obj_type="pod", obj_name="tennet-pod-07", obj_count="5", namespace="default",     ip_list=[])
+tennet_pod8  = dict(obj_type="pod", obj_name="tennet-pod-08", obj_count="1", namespace="default",     ip_list=[])
+tennet_pod9  = dict(obj_type="pod", obj_name="tennet-pod-09", obj_count="2", namespace="default",     ip_list=[])
+tennet_pod10 = dict(obj_type="pod", obj_name="tennet-pod-10", obj_count="1", namespace="default",     ip_list=[])
+tennet_pod11 = dict(obj_type="pod", obj_name="tennet-pod-11", obj_count="1", namespace="default",     ip_list=[])
+tennet_pod12 = dict(obj_type="pod", obj_name="tennet-pod-12", obj_count="1", namespace="default",     ip_list=["10.20.5.101", "10.240.1.1"])
+tennet_pod13 = dict(obj_type="pod", obj_name="tennet-pod-13", obj_count="1", namespace="default",     ip_list=[])
+tennet_pod14 = dict(obj_type="pod", obj_name="tennet-pod-14", obj_count="1", namespace="default",     ip_list=["10.20.6.10", "10.240.1.5", "10.20.5.100"])
diff --git a/libraries/common/users.py b/libraries/common/users.py
new file mode 100644 (file)
index 0000000..421b8cc
--- /dev/null
@@ -0,0 +1,2 @@
+root = {'username': 'root', 'password': ''}
+cloudadmin = {'username': 'cloudadmin', 'password': 'letmeinCRF1234'}
index fd68abd..4ed172c 100644 (file)
@@ -1,3 +1,6 @@
 crl.remotesession
 crl.rfcli>=1.0
 pyyaml
 crl.remotesession
 crl.rfcli>=1.0
 pyyaml
+ruamel.yaml==0.15.96
+netaddr==0.7.19
+requests==2.22.0
index a1f9c70..1bf23cf 100644 (file)
@@ -19,3 +19,11 @@ PyNaCl==1.3.0
 PyYAML==5.1
 robotframework==3.1.1
 six==1.12.0
 PyYAML==5.1
 robotframework==3.1.1
 six==1.12.0
+ruamel.yaml==0.15.96
+netaddr==0.7.19
+requests==2.22.0
+certifi==2019.11.28
+chardet==3.0.4
+idna==2.8
+ruamel.ordereddict==0.4.14
+urllib3==1.25.7
\ No newline at end of file
diff --git a/resources/robot_container/Dockerfile b/resources/robot_container/Dockerfile
new file mode 100644 (file)
index 0000000..c32f5b8
--- /dev/null
@@ -0,0 +1,33 @@
+FROM centos:7.6.1810
+MAINTAINER Balint Tobik <balint.tobik@nokia.com> Endre Nemeth <endre.nemeth@nokia.com>
+
+RUN yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
+    && yum -y --setopt=skip_missing_names_on_install=False install \
+      gcc \
+      python-devel \
+      python2-pip \
+      openssh-clients \
+      jq \
+    && yum clean all
+
+RUN pip install --upgrade pip \
+    && pip install --upgrade setuptools \
+    && pip install PyYAML \
+    && pip install ruamel.yaml \
+    && pip install positional \
+    && pip install pyparsing \
+    && pip install netaddr \
+    && pip install netifaces \
+    && pip install robotframework==3.0.4 \
+    && pip install robotframework-sshlibrary \
+    && pip install paramiko==2.4.2 \
+    && pip install requests \
+    && pip install robotframework-pabot==0.44
+
+
+COPY robot-deployment-test.sh /robot-deployment-test.sh
+COPY testcases/ /cloudtaf/testcases/
+COPY libraries/ /cloudtaf/libraries/
+COPY resources/ /cloudtaf/resources/
+
+ENTRYPOINT /robot-deployment-test.sh
diff --git a/resources/robot_container/README.rst b/resources/robot_container/README.rst
new file mode 100644 (file)
index 0000000..e7e27c1
--- /dev/null
@@ -0,0 +1,47 @@
+Introduction\r
+------------\r
+\r
+Containerized test environment to run functional tests on CAAS deployments. Test cases are written in python, \r
+executed with Robot Framework using Pabot parallel executor for robot.\r
+\r
+The container will contain the test resources including the helm charts and container images for the test applications,\r
+test cases and the scripts needed for the execution.\r
+\r
+\r
+Environment setup\r
+-----------------\r
+\r
+Few config parameters should be set in resources/scripts/include/robot_container.env\r
+\r
+| ROBOT_CONTAINER_TAG: tag of the executor container\r
+| TC_TAG: test cases will be executed indicated with this tag\r
+| SUT_IP: controller-1 node IP of the deployment\r
+| SKIP_BM_ONBOARD if false test applications onboard will be skipped (this is useful for re-execution)\r
+| PASSWORD: password for cloudadmin user\r
+\r
+These parameters should be set for manual execution, otherwise these are set by the jenkins test job\r
+(currently http://jenkins2.mtlab.att-akraino.org/job/Test_cloudtaf_modifications/)\r
+\r
+\r
+Building the environment\r
+------------------------\r
+\r
+resources/scripts/build-test-containers.sh script builds the test containers located in resources/test_containers/ folder.\r
+\r
+resources/scripts/robot-test-build.sh script builds the robot executor container\r
+\r
+\r
+Executing tests\r
+---------------\r
+\r
+resources/scripts/robot-test-run.sh script starts the robot container. The entrypoint is the robot-deployment-test.sh\r
+script, this will perform the onboarding of the test application, and execute the test suites parallelly from folder\r
+testcases/parallel_suites/.\r
+The robot logs will be available in pabot_logs folder.\r
+\r
+Test cases can be executed separately with command like\r
+python -m robot -t "CAAS_BASIC_FUNC_002" --variable floating_ip:<SUT_IP>  --loglevel trace ssh_check.robot\r
+In this case please check the installed packages in the Dockerfile.\r
+\r
+Another option is to start the executor container based on the resources/scripts/robot-test-run.sh script overriding the\r
+entrypoint with option --entrypoint=/bin/bash
\ No newline at end of file
diff --git a/resources/robot_container/robot-deployment-test.sh b/resources/robot_container/robot-deployment-test.sh
new file mode 100755 (executable)
index 0000000..570a33d
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/bash -xe
+
+function execute_test_suites {
+  IP="$1"
+  suite_count=`ls ${WORKDIR}/testcases/parallel_suites/ | grep -c .robot`
+  set +e
+  mkdir -p ~/.ssh
+  touch ~/.ssh/known_hosts
+  ssh-keygen -R ${SUT_IP} -f ~/.ssh/known_hosts
+  PABOT_PORT=$((20000 + ${BUILD_NUMBER}))
+  pabot --verbose --processes ${suite_count} --pabotlib --pabotlibport ${PABOT_PORT} -d ${WORKDIR}/pabot_logs/ -i ${TC_TAG} --variable floating_ip:${SUT_IP} --loglevel trace ${WORKDIR}/testcases/parallel_suites
+
+  set -e
+}
+
+function replace_password
+  if [[ -n "${PASSWORD}" ]] && [[ ${PASSWORD} != "adminpass" ]]
+  then
+    sed -i "s|cloudadmin = {'username': 'cloudadmin', 'password': 'adminpass'}|cloudadmin = {'username': 'cloudadmin', 'password': '${PASSWORD}'}|g" ${WORKDIR}/libraries/common/users.py
+    fi
+
+. ${WORKDIR}/resources/scripts/include/crf-registry
+if [[ -n "${SKIP_BM_ONBOARD}" ]] && [[ "${SKIP_BM_ONBOARD}" != "true" ]]
+then
+  ${WORKDIR}/resources/scripts/prepare_robot_bm.py
+fi
+
+replace_password
+execute_test_suites ${SUT_IP}
+echo "end of robot-deployment-test.sh script"
+
+exit 0
diff --git a/resources/scripts/build-test-containers.sh b/resources/scripts/build-test-containers.sh
new file mode 100755 (executable)
index 0000000..a6b3a5b
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/bash -ex
+
+. include/crf-registry
+test_conatiners=`ls ../test_containers/`
+for val in $test_conatiners 
+do
+    echo "### Building $val test container"
+    
+    docker build --network=host --no-cache --build-arg HTTP_PROXY="${http_proxy}" --build-arg HTTPS_PROXY="${https_proxy}" --build-arg NO_PROXY="${no_proxy}" --build-arg http_proxy="${http_proxy}" --build-arg https_proxy="${https_proxy}" --build-arg no_proxy="${no_proxy}" --tag ${REG}:${REG_PORT}/${REG_PATH}/${val}:latest ../test_containers/${val}/
+    
+    docker save ${REG}:${REG_PORT}/${REG_PATH}/${val}:latest -o ../test_containers/${val}.tar
+    
+    echo ${val} test container is saved to ../test_containers/${val}/${val}.tar
+done
+
+echo ">> Done"
diff --git a/resources/scripts/include/crf-registry b/resources/scripts/include/crf-registry
new file mode 100755 (executable)
index 0000000..e479c4d
--- /dev/null
@@ -0,0 +1,3 @@
+export REG=registry.kube-system.svc.rec.io
+export REG_PORT=5555
+export REG_PATH=caas
diff --git a/resources/scripts/include/robot_container.env b/resources/scripts/include/robot_container.env
new file mode 100644 (file)
index 0000000..79d654f
--- /dev/null
@@ -0,0 +1,6 @@
+ROBOT_CONTAINER_TAG=cloudtaf_robot:latest
+TC_TAG=CI
+SUT_IP=10.88.154.4
+BUILD_NUMBER=42
+SKIP_BM_ONBOARD=false
+PASSWORD=letmeinCRF1234
\ No newline at end of file
diff --git a/resources/scripts/prepare_robot_bm.py b/resources/scripts/prepare_robot_bm.py
new file mode 100755 (executable)
index 0000000..e8876f7
--- /dev/null
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+
+import paramiko
+import os
+import sys
+
+WORK_DIR = os.getenv('WORKDIR')
+sys.path.append(os.path.join(WORK_DIR, 'libraries', 'common'))
+from users import *  # noqa
+
+REG = os.getenv('REG')
+REG_PORT = os.getenv('REG_PORT')
+REG_PATH = os.getenv('REG_PATH')
+
+
+IP = os.getenv('SUT_IP')
+CONTAINERS_DIR = os.path.join(WORK_DIR, 'resources', 'test_containers')
+CHARTS_DIR = os.path.join(WORK_DIR, 'resources', 'test_charts')
+
+
+def open_connection(host, user, password):
+    print"Open paramiko connection to {} with user: {} pass: {}".format(host, user, password)
+    client = paramiko.SSHClient()
+    client.load_system_host_keys()
+    client.set_missing_host_key_policy(paramiko.AutoAddPolicy)
+    client.connect(host, username=user, password=password)
+    return client
+
+
+def create_remote_dir(client, remote_dir):
+    execute_command(client, "rm -rf {}".format(remote_dir))
+    execute_command(client, "mkdir -p {}".format(remote_dir))
+
+
+def delete_remote_dir(client, remote_dir):
+    execute_command(client, "rm -rf {}".format(remote_dir))
+
+
+def execute_command(client, command):
+    _, stdout, stderr = client.exec_command(command)
+    print"The following command executed on remote: {}".format(command)
+    stdout = stdout.read()
+    print('stdout:', stdout)
+    err = stderr.read()
+    if err:
+        raise Exception("The following error occured: {}".format(err))
+    else:
+        return stdout
+
+
+def get_all_files_in_local_dir(local_dir, extension=""):
+    all_files = list()
+    if os.path.exists(local_dir):
+        files = os.listdir(local_dir)
+        for f in files:
+            _, ext = os.path.splitext(f)
+            if extension in ext:
+                filepath = os.path.join(local_dir, f)
+                print "filename:" + filepath
+                if os.path.isdir(filepath):
+                    all_files.extend(get_all_files_in_local_dir(filepath))
+                else:
+                    all_files.append(filepath)
+    else:
+        print '{} folder does not exist'.format(local_dir)
+    return all_files
+
+
+def upload_resources(client, local, remote):
+    sftp = client.open_sftp()
+    for f in local:
+        remote_path = os.path.join("{}{}".format(remote, f.split(remote.split('/')[-1])[1]))
+        remote_dir = remote_path.rsplit('/', 1)[0]
+        execute_command(client, "mkdir -p {}".format(remote_dir))
+        sftp.put(f, remote_path)
+    print"Upload {} from robot container to the SUT {}".format(local, remote)
+
+
+def load_docker_images_from_directory(client, remote_dir):
+    command = "ls {}".format(remote_dir)
+    docker_images = execute_command(client, command).splitlines()
+    for image in docker_images:
+        command = "docker load -i {}/{}".format(remote_dir, image)
+        execute_command(client, command)
+        image_name = image.rsplit('.tar')[0]
+        print image_name
+        command = "docker push {}:{}/{}/{}".format(REG, REG_PORT, REG_PATH, image_name)
+        execute_command(client, command)
+
+
+def create_helm_packages(client, remote_dir):
+    command = "helm repo list"
+    stdout = execute_command(client, command)
+    chart_repo = stdout.splitlines()[1].split()[1]
+    command = "ls {}".format(remote_dir)
+    helm_charts = execute_command(client, command).splitlines()
+    for chart in helm_charts:
+        command = "helm package {}/{}".format(remote_dir, chart)
+        helm_package_path = execute_command(client, command)
+        helm_package = helm_package_path.split(cloudadmin['username'] + '/')[1].rstrip()
+        print helm_package
+        command = "curl -sS -XPOST -H 'Content-Type: application/gzip' --data-binary @{} {}/charts/{}".format(
+            helm_package, chart_repo, helm_package)
+        execute_command(client, command)
+        command = "rm -f {}".format(helm_package_path)
+        execute_command(client, command)
+    command = "helm repo update"
+    execute_command(client, command)
+
+
+def main():
+
+    paramiko_client = open_connection(IP, cloudadmin['username'], cloudadmin['password'])
+    remote_containers_dir = os.path.join("/home/{}/resources/test_containers".format(cloudadmin['username']))
+    container_images = get_all_files_in_local_dir(CONTAINERS_DIR, "tar")
+    remote_test_charts_dir = os.path.join("/home/{}/resources/test_charts".format(cloudadmin['username']))
+    test_charts = get_all_files_in_local_dir(CHARTS_DIR)
+
+    try:
+        create_remote_dir(paramiko_client, remote_containers_dir)
+        create_remote_dir(paramiko_client, remote_test_charts_dir)
+        upload_resources(paramiko_client, container_images, remote_containers_dir)
+        upload_resources(paramiko_client, test_charts, remote_test_charts_dir)
+        load_docker_images_from_directory(paramiko_client, remote_containers_dir)
+        create_helm_packages(paramiko_client, remote_test_charts_dir)
+        delete_remote_dir(paramiko_client, remote_test_charts_dir)
+        delete_remote_dir(paramiko_client, remote_containers_dir)
+
+    finally:
+        paramiko_client.close()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/resources/scripts/robot-test-build.sh b/resources/scripts/robot-test-build.sh
new file mode 100755 (executable)
index 0000000..54e72a4
--- /dev/null
@@ -0,0 +1,18 @@
+#!/bin/bash -ex
+
+env_file=${1:-include/robot_container.env}
+source ${env_file}
+ROBOT_CONTAINER_PATH=../robot_container/
+
+yes | cp -rf ../../libraries/ ${ROBOT_CONTAINER_PATH}
+yes | cp -rf ../../testcases/ ${ROBOT_CONTAINER_PATH}
+mkdir -p ${ROBOT_CONTAINER_PATH}resources/
+yes | cp -rf ../../resources/scripts/ ${ROBOT_CONTAINER_PATH}resources/
+yes | cp -rf ../../resources/test_charts/ ${ROBOT_CONTAINER_PATH}resources/
+yes | cp -rf ../../resources/test_containers/ ${ROBOT_CONTAINER_PATH}resources/
+
+docker build --network=host --no-cache --build-arg HTTP_PROXY="${http_proxy}" --build-arg HTTPS_PROXY="${https_proxy}" --build-arg NO_PROXY="${no_proxy}" --build-arg http_proxy="${http_proxy}" --build-arg https_proxy="${https_proxy}" --build-arg no_proxy="${no_proxy}" --tag ${ROBOT_CONTAINER_TAG} ${ROBOT_CONTAINER_PATH}
+
+rm -rf ${ROBOT_CONTAINER_PATH}/libraries/ ${ROBOT_CONTAINER_PATH}/testcases/ ${ROBOT_CONTAINER_PATH}/resources/
+  
+echo ">> Done"
diff --git a/resources/scripts/robot-test-run.sh b/resources/scripts/robot-test-run.sh
new file mode 100755 (executable)
index 0000000..1b13fc7
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/bash -ex
+env_file=${1:-include/robot_container.env}
+
+source ${env_file}
+
+export WORKDIR=/cloudtaf
+export WORKSPACE=$PWD/../../
+mkdir -p ${WORKSPACE}/pabot_logs
+
+echo ">> Run robot-container"
+docker run \
+  -i \
+  --rm \
+  --net=host \
+  --pid=host \
+  --name robot-test-${BUILD_NUMBER} \
+  -e TC_TAG=${TC_TAG} \
+  -e SUT_IP=${SUT_IP} \
+  -e BUILD_NUMBER=${BUILD_NUMBER} \
+  -e PASSWORD=${PASSWORD} \
+  -e SKIP_BM_ONBOARD=${SKIP_BM_ONBOARD} \
+  -e WORKDIR=${WORKDIR} \
+  -v ${WORKSPACE}/pabot_logs:${WORKDIR}/pabot_logs \
+  -w ${WORKDIR} \
+  ${ROBOT_CONTAINER_TAG}
+
+echo ">> Done"
diff --git a/resources/test_charts/busybox3/Chart.yaml b/resources/test_charts/busybox3/Chart.yaml
new file mode 100644 (file)
index 0000000..dd4c9b2
--- /dev/null
@@ -0,0 +1,3 @@
+name: busybox3
+version: 3.3.3 
+description: busybox helm try
diff --git a/resources/test_charts/busybox3/templates/busybox.yaml b/resources/test_charts/busybox3/templates/busybox.yaml
new file mode 100644 (file)
index 0000000..d47327a
--- /dev/null
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: busybox3
+  namespace: kube-system
+spec:
+  containers:
+  - name: busybox
+    image: {{ .Values.registry_url }}:5555/caas/busybox:latest
+    args:
+    - sleep
+    - "1000"
diff --git a/resources/test_charts/clusternetwork-test-error/Chart.yaml b/resources/test_charts/clusternetwork-test-error/Chart.yaml
new file mode 100644 (file)
index 0000000..5125d2d
--- /dev/null
@@ -0,0 +1,3 @@
+description: chart for invalid clusternetwork validation tests
+name: clusternetwork-test-error
+version: 1.0.0
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_01.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_01.yaml
new file mode 100644 (file)
index 0000000..8b0af02
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-01
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 1000.100.1.0/24
+    allocation_pool:
+      start: 10.100.1.100
+      end: 10.100.1.200 
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_02_01.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_02_01.yaml
new file mode 100644 (file)
index 0000000..79f66aa
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-02-01
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.100.2.0/24
+    allocation_pool:
+      start: 10.100.2.100
+      end: 10.100.2.200
+    routes:
+      10.20.0.0/24: 10.100.2.99.1
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_02_02.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_02_02.yaml
new file mode 100644 (file)
index 0000000..40f1998
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-02-02
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.100.2.0/24
+    allocation_pool:
+      start: 10.100.2.100
+      end: 10.100.2.200
+    routes:
+      10.20.0.0/24: 10.100.3.201
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_03.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_03.yaml
new file mode 100644 (file)
index 0000000..144afce
--- /dev/null
@@ -0,0 +1,13 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-03
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    net6: fffff:0:0:0:0:ffff:0f00:0000/120
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_04_01.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_04_01.yaml
new file mode 100644 (file)
index 0000000..9c55980
--- /dev/null
@@ -0,0 +1,15 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-04-01
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    net6: 2001:db8::/45
+    routes6:
+      2000:db8::/45: 2001:0db7:ffff:ffff:ffff:ffff:ffff:ffff
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_04_02.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_04_02.yaml
new file mode 100644 (file)
index 0000000..ba21c49
--- /dev/null
@@ -0,0 +1,15 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-04-02
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    net6: 2001:db8::/45
+    routes6:
+      2000:db8::/45: "2001:0db8:0008::"
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_05.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_05.yaml
new file mode 100644 (file)
index 0000000..684f06f
--- /dev/null
@@ -0,0 +1,15 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-05
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    allocation_pool:
+      start: 10.100.5.100
+      end: 10.100.5.200 
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_06.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_06.yaml
new file mode 100644 (file)
index 0000000..803df36
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-06
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.100.6.0/16
+    allocation_pool:
+      start: 10.100.6.100
+      end: 10.100.5.101
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_07.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_07.yaml
new file mode 100644 (file)
index 0000000..2458092
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-07
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    vxlan: 1400
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 100.7.0.0/16
+    allocation_pool:
+      start: 100.7.255.0
+      end: 100.7.255.255
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_08.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_08.yaml
new file mode 100644 (file)
index 0000000..5d3be62
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-08
+spec:
+  NetworkID: long_cnet-08
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 100.100.8.0/24
+    allocation_pool:
+      start: 100.100.8.0
+      end: 100.100.8.255 
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_09.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_09.yaml
new file mode 100644 (file)
index 0000000..50a5103
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-09
+spec:
+  NetworkID: external
+  NetworkType: sriov
+  Options:
+    host_device: ens3
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 200.0.9.0/16
+    allocation_pool:
+      start: 200.0.9.0
+      end: 200.0.9.255 
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_10.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_10.yaml
new file mode 100644 (file)
index 0000000..fb0ffeb
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-10
+spec:
+  NetworkID: external
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.100.6.0/24
+    allocation_pool:
+      start: 10.100.5.100
+      end: 10.100.5.150
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_11.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_11.yaml
new file mode 100644 (file)
index 0000000..7d3ba23
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-11
+spec:
+  NetworkID: sriovinv
+  NetworkType: sriov
+  Options:
+    host_device: dummyjoska
+    device_pool: "nokia.k8s.io/sriov_ens3" 
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 200.0.11.0/16
+    allocation_pool:
+      start: 200.0.11.0
+      end: 200.0.11.255
diff --git a/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_12.yaml b/resources/test_charts/clusternetwork-test-error/templates/cnet_invalid_12.yaml
new file mode 100644 (file)
index 0000000..2c07b65
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-invalid-11
+spec:
+  NetworkID: external
+  NetworkType: macvlan
+  Options:
+    Alloc: gAAAAAAAAAAAAAAAAAAAA=
+    host_device: ens3  
+    vlan: 500
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 200.0.11.0/16
+    allocation_pool:
+      start: 200.0.11.0
+      end: 200.0.11.255 
diff --git a/resources/test_charts/clusternetwork-test/Chart.yaml b/resources/test_charts/clusternetwork-test/Chart.yaml
new file mode 100644 (file)
index 0000000..24d8b14
--- /dev/null
@@ -0,0 +1,3 @@
+description: chart for valid clusternetwork validation tests
+name: clusternetwork-test
+version: 1.0.0 
\ No newline at end of file
diff --git a/resources/test_charts/clusternetwork-test/templates/cnet_01.yaml b/resources/test_charts/clusternetwork-test/templates/cnet_01.yaml
new file mode 100644 (file)
index 0000000..39f9c57
--- /dev/null
@@ -0,0 +1,12 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-01
+spec:
+  NetworkID: cnet01
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_ext_if }}  
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.0.0.0/24
diff --git a/resources/test_charts/clusternetwork-test/templates/cnet_02.yaml b/resources/test_charts/clusternetwork-test/templates/cnet_02.yaml
new file mode 100644 (file)
index 0000000..06f39ad
--- /dev/null
@@ -0,0 +1,15 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-02
+spec:
+  NetworkID: cnet02
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_ext_if }}  
+    vlan: 502
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.2.0.0/24
+    routes:
+      10.2.0.0/24: 10.2.0.254
diff --git a/resources/test_charts/clusternetwork-test/templates/cnet_03.yaml b/resources/test_charts/clusternetwork-test/templates/cnet_03.yaml
new file mode 100644 (file)
index 0000000..4fbdf67
--- /dev/null
@@ -0,0 +1,14 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-03
+spec:
+  NetworkID: cnet03
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}  
+    vxlan: 503
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.3.0.0/16
+    net6: 2001:db8::/45
diff --git a/resources/test_charts/clusternetwork-test/templates/cnet_04.yaml b/resources/test_charts/clusternetwork-test/templates/cnet_04.yaml
new file mode 100644 (file)
index 0000000..95c2504
--- /dev/null
@@ -0,0 +1,19 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-04
+spec:
+  NetworkID: cnet04
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_ext_if }}  
+    vlan: 504
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.4.1.0/24
+    net6: 2001:db8::/45
+    routes:
+      10.4.1.0/24: 10.4.1.1
+    routes6:
+      2001:db8::/45: 2001:db8:1::1
+
diff --git a/resources/test_charts/clusternetwork-test/templates/cnet_05.yaml b/resources/test_charts/clusternetwork-test/templates/cnet_05.yaml
new file mode 100644 (file)
index 0000000..0340348
--- /dev/null
@@ -0,0 +1,15 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-05
+spec:
+  NetworkID: cnet05
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_ext_if }}  
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.5.1.0/16
+    allocation_pool:
+      start: 10.5.1.100
+      end: 10.5.2.200
diff --git a/resources/test_charts/clusternetwork-test/templates/cnet_06.yaml b/resources/test_charts/clusternetwork-test/templates/cnet_06.yaml
new file mode 100644 (file)
index 0000000..5138504
--- /dev/null
@@ -0,0 +1,21 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-06
+spec:
+  NetworkID: cnet06
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_ext_if }}  
+    vlan: 506
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.6.1.0/24
+    net6: 2001:db8::/43
+    allocation_pool:
+      start: 10.6.1.2
+      end: 10.6.1.200
+    routes:
+      10.6.1.0/24: 10.6.1.1
+    routes6:
+      2001:db8::/43: 2001:db8:1::1
diff --git a/resources/test_charts/clusternetwork-test/templates/cnet_07.yaml b/resources/test_charts/clusternetwork-test/templates/cnet_07.yaml
new file mode 100644 (file)
index 0000000..79b23be
--- /dev/null
@@ -0,0 +1,23 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-07
+spec:
+  NetworkID: cnet07
+  NetworkType: ipvlan
+  AllowedTenants:
+    - kube-system
+    - kube-public
+  Options:
+    host_device: {{ .Values.infra_int_if }}  
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.7.1.0/24
+    net6: 2001:db8::/43
+    allocation_pool:
+      start: 10.7.1.1
+      end: 10.7.1.200
+    routes:
+      10.7.1.0/24: 10.7.1.1
+    routes6:
+      2001:db8::/44: 2001:db8:1::1
diff --git a/resources/test_charts/clusternetwork-test/templates/cnet_08.yaml b/resources/test_charts/clusternetwork-test/templates/cnet_08.yaml
new file mode 100644 (file)
index 0000000..09b8fbd
--- /dev/null
@@ -0,0 +1,15 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-08
+spec:
+  NetworkID: cnet08
+  NetworkType: sriov
+  Options:
+    device_pool: "nokia.k8s.io/sriov_ens3"
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 10.8.1.0/24
+    allocation_pool:
+      start: 10.8.1.100
+      end: 10.8.1.200
diff --git a/resources/test_charts/clusternetwork-test/templates/cnet_09.yaml b/resources/test_charts/clusternetwork-test/templates/cnet_09.yaml
new file mode 100644 (file)
index 0000000..1a19003
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-09
+spec:
+  NetworkID: cnet09
+  NetworkType: macvlan
+  Options:
+    device_pool: "nokia.k8s.io/sriov_ens3"
+    vlan: 509
+    container_prefix: ext
+    rt_tables: 100
+    cidr: 200.0.10.0/16
+    allocation_pool:
+      start: 200.0.10.0
+      end: 200.0.10.255 
diff --git a/resources/test_charts/cpu-pooling-annotation1/Chart.yaml b/resources/test_charts/cpu-pooling-annotation1/Chart.yaml
new file mode 100644 (file)
index 0000000..a4d2fe3
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-annotation1
+version: 2.0.0
+description: pods for cpu-pooling-annotation1  tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-annotation1/templates/cpupooling-deployment9.yaml b/resources/test_charts/cpu-pooling-annotation1/templates/cpupooling-deployment9.yaml
new file mode 100644 (file)
index 0000000..30104c6
--- /dev/null
@@ -0,0 +1,32 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-9
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+      annotations:
+        nokia.k8s.io/cpus: |
+          [{
+            "container": "cpu-pooling",
+            "processes":
+              [{
+                "process": "/usr/bin/dumb-init",
+                "args": ["-c", "sleep", "1000"],
+                "pool": "exclusive_caas",
+                "cpus": {{ .Values.proc_req }}
+              }]
+          }]
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-annotation1/values.yaml b/resources/test_charts/cpu-pooling-annotation1/values.yaml
new file mode 100644 (file)
index 0000000..00f09b6
--- /dev/null
@@ -0,0 +1,3 @@
+pool_req: "1"
+proc_req: 1
+replicas: 1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-annotation2/Chart.yaml b/resources/test_charts/cpu-pooling-annotation2/Chart.yaml
new file mode 100644 (file)
index 0000000..9709b83
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-annotation2
+version: 2.0.0
+description: pods for cpu-pooling-annotation2  tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-annotation2/templates/cpupooling-deployment10.yaml b/resources/test_charts/cpu-pooling-annotation2/templates/cpupooling-deployment10.yaml
new file mode 100644 (file)
index 0000000..e492946
--- /dev/null
@@ -0,0 +1,36 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-10
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+      annotations:
+        nokia.k8s.io/cpus: |
+          [{
+            "processes":
+              [{
+                "process": "/usr/bin/dumb-init",
+                "args": ["-c", "sleep", "1000"],
+                "pool": "exclusive_caas",
+                "cpus": {{ .Values.proc_req }}
+              }]
+          }]
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
+        resources:
+          requests:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
+          limits:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
diff --git a/resources/test_charts/cpu-pooling-annotation2/values.yaml b/resources/test_charts/cpu-pooling-annotation2/values.yaml
new file mode 100644 (file)
index 0000000..00f09b6
--- /dev/null
@@ -0,0 +1,3 @@
+pool_req: "1"
+proc_req: 1
+replicas: 1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-annotation3/Chart.yaml b/resources/test_charts/cpu-pooling-annotation3/Chart.yaml
new file mode 100644 (file)
index 0000000..5ad22e7
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-annotation3
+version: 2.0.0
+description: pods for cpu-pooling-annotation3 tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-annotation3/templates/cpupooling-deployment11.yaml b/resources/test_charts/cpu-pooling-annotation3/templates/cpupooling-deployment11.yaml
new file mode 100644 (file)
index 0000000..36e26b8
--- /dev/null
@@ -0,0 +1,36 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-11
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+      annotations:
+        nokia.k8s.io/cpus: |
+          [{
+            "container": "cpu-pooling",
+            "processes":
+              [{
+                "process": "/usr/bin/dumb-init",
+                "args": ["-c", "sleep", "1000"],
+                "pool": "exclusive_caas"
+              }]
+          }]
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
+        resources:
+          requests:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
+          limits:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
diff --git a/resources/test_charts/cpu-pooling-annotation3/values.yaml b/resources/test_charts/cpu-pooling-annotation3/values.yaml
new file mode 100644 (file)
index 0000000..00f09b6
--- /dev/null
@@ -0,0 +1,3 @@
+pool_req: "1"
+proc_req: 1
+replicas: 1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-default1/Chart.yaml b/resources/test_charts/cpu-pooling-default1/Chart.yaml
new file mode 100644 (file)
index 0000000..e224523
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-default1
+version: 2.0.0
+description: pod for cpu-pooling-default1 tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-default1/templates/cpupooling-deployment7.yaml b/resources/test_charts/cpu-pooling-default1/templates/cpupooling-deployment7.yaml
new file mode 100644 (file)
index 0000000..545d3ee
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-7
+  namespace: default
+spec:
+  replicas: {{ .Values.replicas }}
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "1000"]
+        resources:
+          requests:
+            memory: {{ .Values.mem_request }}
+            cpu: {{ .Values.cpu_request }}
+          limits:
+            memory: {{ .Values.mem_limit }}
+            cpu: {{ .Values.cpu_limit }}
+      nodeSelector:
+        nodename: {{ .Values.nodename }}
diff --git a/resources/test_charts/cpu-pooling-default1/values.yaml b/resources/test_charts/cpu-pooling-default1/values.yaml
new file mode 100644 (file)
index 0000000..b7ffc39
--- /dev/null
@@ -0,0 +1,8 @@
+pool_req: "1"
+proc_req: 1
+replicas: 1
+mem_request: "1Gi"
+cpu_request: "500m"
+mem_limit: "1Gi"
+cpu_limit: "500m"
+nodename: caas_master1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-default2/Chart.yaml b/resources/test_charts/cpu-pooling-default2/Chart.yaml
new file mode 100644 (file)
index 0000000..f324e21
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-default2
+version: 2.0.0
+description: pod for cpu-pooling-default2 tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-default2/templates/cpupooling-deployment8.yaml b/resources/test_charts/cpu-pooling-default2/templates/cpupooling-deployment8.yaml
new file mode 100644 (file)
index 0000000..7541e36
--- /dev/null
@@ -0,0 +1,38 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-8
+  namespace: default
+spec:
+  replicas: {{ .Values.replicas }}
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+      annotations:
+        nokia.k8s.io/cpus: |
+          [{
+            "container": "cpu-pooling",
+            "processes":
+              [{
+                "process": "/usr/bin/dumb-init",
+                "args": ["-c", "sleep", "1000"],
+                "pool": "default",
+                "cpus": {{ .Values.proc_req }}
+              }]
+          }]
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
+        resources:
+          requests:
+            nokia.k8s.io/default: {{ .Values.pool_req }}
+          limits:
+            nokia.k8s.io/default: {{ .Values.pool_req }}
diff --git a/resources/test_charts/cpu-pooling-default2/values.yaml b/resources/test_charts/cpu-pooling-default2/values.yaml
new file mode 100644 (file)
index 0000000..00f09b6
--- /dev/null
@@ -0,0 +1,3 @@
+pool_req: "1"
+proc_req: 1
+replicas: 1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-exclusive1/Chart.yaml b/resources/test_charts/cpu-pooling-exclusive1/Chart.yaml
new file mode 100644 (file)
index 0000000..e23fef2
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-exclusive1
+version: 2.0.0
+description: pods for cpu-pooling-exclusive1  tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-exclusive1/templates/cpupooling-deployment1.yaml b/resources/test_charts/cpu-pooling-exclusive1/templates/cpupooling-deployment1.yaml
new file mode 100644 (file)
index 0000000..875ca7b
--- /dev/null
@@ -0,0 +1,27 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-1
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "1000"]
+        resources:
+          requests:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
+          limits:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
+      nodeSelector:
+        nodename: {{ .Values.nodename }}
diff --git a/resources/test_charts/cpu-pooling-exclusive1/values.yaml b/resources/test_charts/cpu-pooling-exclusive1/values.yaml
new file mode 100644 (file)
index 0000000..8c53621
--- /dev/null
@@ -0,0 +1,2 @@
+pool_req: "1"
+nodename: caas_worker1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-exclusive2/Chart.yaml b/resources/test_charts/cpu-pooling-exclusive2/Chart.yaml
new file mode 100644 (file)
index 0000000..1c5e710
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-exclusive2
+version: 2.0.0
+description: pods for cpu-pooling-exclusive2 tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-exclusive2/templates/cpupooling-deployment2.yaml b/resources/test_charts/cpu-pooling-exclusive2/templates/cpupooling-deployment2.yaml
new file mode 100644 (file)
index 0000000..c45df51
--- /dev/null
@@ -0,0 +1,40 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-2
+  namespace: default
+spec:
+  replicas: {{ .Values.replicas }}
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+      annotations:
+        nokia.k8s.io/cpus: |
+          [{
+            "container": "cpu-pooling",
+            "processes":
+              [{
+                "process": "/usr/bin/dumb-init",
+                "args": ["-c", "sleep", "1000"],
+                "pool": "exclusive_caas",
+                "cpus": {{ .Values.proc_req }}
+              }]
+          }]
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
+        resources:
+          requests:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
+          limits:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
+      nodeSelector:
+        nodename: {{ .Values.nodename }}
diff --git a/resources/test_charts/cpu-pooling-exclusive2/values.yaml b/resources/test_charts/cpu-pooling-exclusive2/values.yaml
new file mode 100644 (file)
index 0000000..613848e
--- /dev/null
@@ -0,0 +1,4 @@
+pool_req: "1"
+proc_req: 1
+replicas: 1
+nodename: caas_worker1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-exclusive3/Chart.yaml b/resources/test_charts/cpu-pooling-exclusive3/Chart.yaml
new file mode 100644 (file)
index 0000000..4478a27
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-exclusive3
+version: 2.0.0
+description: pods for cpu-pooling-exclusive3 tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-exclusive3/templates/cpupooling-deployment3.yaml b/resources/test_charts/cpu-pooling-exclusive3/templates/cpupooling-deployment3.yaml
new file mode 100644 (file)
index 0000000..fc9a1ab
--- /dev/null
@@ -0,0 +1,45 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-3
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+      annotations:
+        nokia.k8s.io/cpus: |
+          [{
+            "container": "cpu-pooling",
+            "processes":
+              [{
+                "process": "/usr/bin/dumb-init",
+                "args": ["-c", "sleep", "1000"],
+                "pool": "exclusive_caas",
+                "cpus": {{ .Values.proc1_req }}
+              },
+              {
+                "process": "/usr/bin/dumb-init",
+                "args": ["-c", "sleep", "1000"],
+                "pool": "exclusive_caas",
+                "cpus": {{ .Values.proc2_req }}
+              }]
+          }]
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
+        resources:
+          requests:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
+          limits:
+            nokia.k8s.io/exclusive_caas: {{ .Values.pool_req }}
+      nodeSelector:
+        nodename: {{ .Values.nodename }}
diff --git a/resources/test_charts/cpu-pooling-exclusive3/values.yaml b/resources/test_charts/cpu-pooling-exclusive3/values.yaml
new file mode 100644 (file)
index 0000000..a557adb
--- /dev/null
@@ -0,0 +1,4 @@
+pool_req: "2"
+proc1_req: 1
+proc2_req: 1
+nodename: caas_worker1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-mix1/Chart.yaml b/resources/test_charts/cpu-pooling-mix1/Chart.yaml
new file mode 100644 (file)
index 0000000..e30b855
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-mix1
+version: 2.0.0
+description: pods for cpu-pooling-mix1  tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-mix1/templates/cpupooling-deployment5.yaml b/resources/test_charts/cpu-pooling-mix1/templates/cpupooling-deployment5.yaml
new file mode 100644 (file)
index 0000000..462cac0
--- /dev/null
@@ -0,0 +1,48 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-5
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+      annotations:
+        nokia.k8s.io/cpus: |
+          [{
+            "container": "cpu-pooling1",
+            "processes":
+              [{
+                "process": "/usr/bin/dumb-init",
+                "args": ["-c", "sleep", "1000"],
+                "pool": "exclusive_caas",
+                "cpus": {{ .Values.proc_req }}
+              }]
+          }]
+    spec:
+      containers:
+      - name: cpu-pooling1
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
+        resources:
+          requests:
+            nokia.k8s.io/exclusive_caas: {{ .Values.exclusive_pool_req }}
+          limits:
+            nokia.k8s.io/exclusive_caas: {{ .Values.exclusive_pool_req }}
+      - name: cpu-pooling2
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        command: ["/bin/sh", "-c", "--"]
+        args: ["while true; do echo \"Test\"; sleep 1; done;"]
+        resources:
+          requests:
+            nokia.k8s.io/shared_caas: {{ .Values.shared_pool_req }}
+          limits:
+            nokia.k8s.io/shared_caas: {{ .Values.shared_pool_req }}
+      nodeSelector:
+        nodename: {{ .Values.nodename }}
diff --git a/resources/test_charts/cpu-pooling-mix1/values.yaml b/resources/test_charts/cpu-pooling-mix1/values.yaml
new file mode 100644 (file)
index 0000000..48f730d
--- /dev/null
@@ -0,0 +1,4 @@
+shared_pool_req: "500"
+exclusive_pool_req: "1"
+proc_req: 1
+nodename: caas_worker1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-mix2/Chart.yaml b/resources/test_charts/cpu-pooling-mix2/Chart.yaml
new file mode 100644 (file)
index 0000000..56b2d99
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-mix2
+version: 2.0.1
+description: pods for cpu-pooling-mix2  tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-mix2/templates/cpupooling-deployment6.yaml b/resources/test_charts/cpu-pooling-mix2/templates/cpupooling-deployment6.yaml
new file mode 100644 (file)
index 0000000..65ccba0
--- /dev/null
@@ -0,0 +1,45 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-6
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+      annotations:
+        nokia.k8s.io/cpus: |
+          [{
+            "container": "cpu-pooling",
+            "processes":
+              [{
+                  "process": "/usr/bin/dumb-init",
+                  "args": ["-c", "sleep", "1000"],
+                  "pool": "exclusive_caas",
+                  "cpus": {{ .Values.proc_req }}
+                },
+                {
+                  "process": "/usr/bin/dumb-init",
+                  "args": ["-c", "sleep", "2000"],
+                  "pool": "shared_caas",
+                  "cpus": {{ .Values.shared_pool_ann }}
+              }]
+          }]
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
+        resources:
+          requests:
+            nokia.k8s.io/exclusive_caas: {{ .Values.exclusive_pool_req }}
+            nokia.k8s.io/shared_caas: {{ .Values.shared_pool_req }}
+          limits:
+            nokia.k8s.io/exclusive_caas: {{ .Values.exclusive_pool_req }}
+            nokia.k8s.io/shared_caas: {{ .Values.shared_pool_req }}
diff --git a/resources/test_charts/cpu-pooling-mix2/values.yaml b/resources/test_charts/cpu-pooling-mix2/values.yaml
new file mode 100644 (file)
index 0000000..9c9152b
--- /dev/null
@@ -0,0 +1,4 @@
+shared_pool_req: "500"
+shared_pool_ann: 500
+exclusive_pool_req: "1"
+proc_req: 1
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-shared1/Chart.yaml b/resources/test_charts/cpu-pooling-shared1/Chart.yaml
new file mode 100644 (file)
index 0000000..af23824
--- /dev/null
@@ -0,0 +1,3 @@
+name:  cpu-pooling-shared1
+version: 2.0.1
+description: pods for cpu-pooling-shared1  tests
\ No newline at end of file
diff --git a/resources/test_charts/cpu-pooling-shared1/templates/cpupooling-deployment4.yaml b/resources/test_charts/cpu-pooling-shared1/templates/cpupooling-deployment4.yaml
new file mode 100644 (file)
index 0000000..b5d5ab0
--- /dev/null
@@ -0,0 +1,27 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: cpu-pooling-4
+  namespace: default
+spec:
+  selector:
+    matchLabels:
+      app: cpu-pooling
+  template:
+    metadata:
+      labels:
+        app: cpu-pooling
+    spec:
+      containers:
+      - name: cpu-pooling
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/bin/sh", "-c", "--"]
+        args: ["yes > /dev/null"]
+        resources:
+          requests:
+            nokia.k8s.io/shared_caas: {{ .Values.pool_req }}
+          limits:
+            nokia.k8s.io/shared_caas: {{ .Values.pool_req }}
+      nodeSelector:
+        nodename: {{ .Values.nodename }}
diff --git a/resources/test_charts/cpu-pooling-shared1/values.yaml b/resources/test_charts/cpu-pooling-shared1/values.yaml
new file mode 100644 (file)
index 0000000..c3bf4d8
--- /dev/null
@@ -0,0 +1,2 @@
+pool_req: "500"
+nodename: caas_worker1
\ No newline at end of file
diff --git a/resources/test_charts/custom-metrics/Chart.yaml b/resources/test_charts/custom-metrics/Chart.yaml
new file mode 100644 (file)
index 0000000..909e57e
--- /dev/null
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: custom-metrics
+version: 0.4.0
diff --git a/resources/test_charts/custom-metrics/templates/podinfo-dep.yaml b/resources/test_charts/custom-metrics/templates/podinfo-dep.yaml
new file mode 100644 (file)
index 0000000..48a5966
--- /dev/null
@@ -0,0 +1,48 @@
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: podinfo
+  namespace: kube-system
+spec:
+  selector:
+    matchLabels:
+      app: podinfo
+      k8s-app: podinfo
+  replicas: 2
+  template:
+    metadata:
+      labels:
+        app: podinfo
+        k8s-app: podinfo
+      annotations:
+        prometheus.io/scrape: 'true'
+    spec:
+      containers:
+      - name: podinfod
+        image: {{ .Values.registry_url }}:5555/caas/podinfo:latest
+        imagePullPolicy: Always
+        ports:
+        - containerPort: 9898
+          protocol: TCP
+        readinessProbe:
+          httpGet:
+            path: /readyz
+            port: 9898
+          initialDelaySeconds: 1
+          periodSeconds: 2
+          failureThreshold: 1
+        livenessProbe:
+          httpGet:
+            path: /healthz
+            port: 9898
+          initialDelaySeconds: 1
+          periodSeconds: 3
+          failureThreshold: 2
+        resources:
+          requests:
+            memory: "32Mi"
+            cpu: "1m"
+          limits:
+            memory: "256Mi"
+            cpu: "100m"
diff --git a/resources/test_charts/custom-metrics/templates/podinfo-hpa-custom.yaml b/resources/test_charts/custom-metrics/templates/podinfo-hpa-custom.yaml
new file mode 100644 (file)
index 0000000..086411b
--- /dev/null
@@ -0,0 +1,18 @@
+---
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+  name: podinfo
+  namespace: kube-system
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: podinfo
+  minReplicas: 2
+  maxReplicas: 10
+  metrics:
+  - type: Pods
+    pods:
+      metricName: http_requests
+      targetAverageValue: 5
diff --git a/resources/test_charts/custom-metrics/templates/podinfo-svc.yaml b/resources/test_charts/custom-metrics/templates/podinfo-svc.yaml
new file mode 100644 (file)
index 0000000..0592015
--- /dev/null
@@ -0,0 +1,17 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: podinfo
+  namespace: kube-system
+  labels:
+    app: podinfo
+    k8s-app: podinfo
+spec:
+  ports:
+    - port: 9898
+      targetPort: 9898
+      protocol: TCP
+  selector:
+    k8s-app: podinfo
+    app: podinfo
diff --git a/resources/test_charts/danmnet-pods1/Chart.yaml b/resources/test_charts/danmnet-pods1/Chart.yaml
new file mode 100644 (file)
index 0000000..5609195
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods1
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods1/templates/danmnet-pods1-1.yaml b/resources/test_charts/danmnet-pods1/templates/danmnet-pods1-1.yaml
new file mode 100644 (file)
index 0000000..7a7f3c2
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods1-1
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"10.5.1.11/16"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods1/templates/danmnet-pods1-2.yaml b/resources/test_charts/danmnet-pods1/templates/danmnet-pods1-2.yaml
new file mode 100644 (file)
index 0000000..f059c97
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods1-2
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"10.5.1.19/16"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods1/templates/danmnet-pods1-3.yaml b/resources/test_charts/danmnet-pods1/templates/danmnet-pods1-3.yaml
new file mode 100644 (file)
index 0000000..2669933
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods1-3
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"10.5.1.20/16"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods1/templates/danmnet-pods1-4.yaml b/resources/test_charts/danmnet-pods1/templates/danmnet-pods1-4.yaml
new file mode 100644 (file)
index 0000000..a00eb22
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods1-4
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"10.5.255.254/16"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods10/Chart.yaml b/resources/test_charts/danmnet-pods10/Chart.yaml
new file mode 100644 (file)
index 0000000..952aa4c
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods10
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods10/templates/danmnet-pods10.yaml b/resources/test_charts/danmnet-pods10/templates/danmnet-pods10.yaml
new file mode 100644 (file)
index 0000000..2ccef00
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods10
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"none"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods11/Chart.yaml b/resources/test_charts/danmnet-pods11/Chart.yaml
new file mode 100644 (file)
index 0000000..2a66a33
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods11
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods11/templates/danmnet-pods11.yaml b/resources/test_charts/danmnet-pods11/templates/danmnet-pods11.yaml
new file mode 100644 (file)
index 0000000..315734d
--- /dev/null
@@ -0,0 +1,36 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods11
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod5", "ip":"10.10.0.250/24"
+            },
+            {
+              "clusterNetwork":"dummy", "ip":"dynamic"
+            },
+            {
+              "clusterNetwork":"cnet-pod6", "ip":"none"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods12/Chart.yaml b/resources/test_charts/danmnet-pods12/Chart.yaml
new file mode 100644 (file)
index 0000000..a38f60e
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods12
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods12/templates/danmnet-pods12.yaml b/resources/test_charts/danmnet-pods12/templates/danmnet-pods12.yaml
new file mode 100644 (file)
index 0000000..2f6618d
--- /dev/null
@@ -0,0 +1,36 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods12
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod5", "ip":"10.10.0.250/24"
+            },
+            {
+              "clusterNetwork":"cnet-pod7", "ip":"none"
+            },
+            {
+              "clusterNetwork":"cnet-pod6", "ip":"dynamic"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods13/Chart.yaml b/resources/test_charts/danmnet-pods13/Chart.yaml
new file mode 100644 (file)
index 0000000..47a1245
--- /dev/null
@@ -0,0 +1,3 @@
+description: pods for danmnet tests
+name: danmnet-pods13
+version: 2.0.0
diff --git a/resources/test_charts/danmnet-pods13/templates/danmnet-pods13.yaml b/resources/test_charts/danmnet-pods13/templates/danmnet-pods13.yaml
new file mode 100644 (file)
index 0000000..94099f8
--- /dev/null
@@ -0,0 +1,36 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods13
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod6", "ip":"10.10.0.250/24"
+            },
+            {
+              "clusterNetwork":"dummy", "ip":"dynamic"
+            },
+            {
+              "clusterNetwork":"cnet-pod4", "ip":"dynamic"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods14/Chart.yaml b/resources/test_charts/danmnet-pods14/Chart.yaml
new file mode 100644 (file)
index 0000000..7c122da
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods14
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods14/templates/danmnet-pods14.yaml b/resources/test_charts/danmnet-pods14/templates/danmnet-pods14.yaml
new file mode 100644 (file)
index 0000000..419b405
--- /dev/null
@@ -0,0 +1,36 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods14
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod4", "ip":"dynamic"
+            },
+            {
+              "clusterNetwork":"cnet-pod7", "ip":"10.10.0.254/24"
+            },
+            {
+              "clusterNetwork":"cnet-pod6", "ip":"10.20.0.50/24"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods2/Chart.yaml b/resources/test_charts/danmnet-pods2/Chart.yaml
new file mode 100644 (file)
index 0000000..0850e2a
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods2
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods2/templates/danmnet-pods2.yaml b/resources/test_charts/danmnet-pods2/templates/danmnet-pods2.yaml
new file mode 100644 (file)
index 0000000..7499a6e
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods2
+  namespace: default
+spec:
+  replicas: 10
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"dynamic"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods3/Chart.yaml b/resources/test_charts/danmnet-pods3/Chart.yaml
new file mode 100644 (file)
index 0000000..3e4aa47
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods3
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods3/templates/danmnet-pods3-1.yaml b/resources/test_charts/danmnet-pods3/templates/danmnet-pods3-1.yaml
new file mode 100644 (file)
index 0000000..d5c9468
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods3-1
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"10.5.1.11/16"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods3/templates/danmnet-pods3-2.yaml b/resources/test_charts/danmnet-pods3/templates/danmnet-pods3-2.yaml
new file mode 100644 (file)
index 0000000..0f6078a
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods3-2
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"10.5.1.19/16"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master2
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods3/templates/danmnet-pods3-3.yaml b/resources/test_charts/danmnet-pods3/templates/danmnet-pods3-3.yaml
new file mode 100644 (file)
index 0000000..b2b0d64
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods3-3
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"10.5.1.20/16"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master3
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods3/templates/danmnet-pods3-4.yaml b/resources/test_charts/danmnet-pods3/templates/danmnet-pods3-4.yaml
new file mode 100644 (file)
index 0000000..088a116
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods3-4
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"10.5.255.254/16"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods4/Chart.yaml b/resources/test_charts/danmnet-pods4/Chart.yaml
new file mode 100644 (file)
index 0000000..af85338
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods4
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods4/templates/danmnet-pods4.yaml b/resources/test_charts/danmnet-pods4/templates/danmnet-pods4.yaml
new file mode 100644 (file)
index 0000000..25badb3
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods4
+  namespace: kube-system
+spec:
+  replicas: 5
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "network":"test-net2", "ip":"dynamic"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods5/Chart.yaml b/resources/test_charts/danmnet-pods5/Chart.yaml
new file mode 100644 (file)
index 0000000..19adc83
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods5
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods5/templates/danmnet-pods5.yaml b/resources/test_charts/danmnet-pods5/templates/danmnet-pods5.yaml
new file mode 100644 (file)
index 0000000..06cb803
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods5
+  namespace: kube-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod1", "ip":"10.6.0.1/16"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods6/Chart.yaml b/resources/test_charts/danmnet-pods6/Chart.yaml
new file mode 100644 (file)
index 0000000..8519c86
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods6
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods6/templates/danmnet-pods6-1.yaml b/resources/test_charts/danmnet-pods6/templates/danmnet-pods6-1.yaml
new file mode 100644 (file)
index 0000000..d0d580a
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods6-1
+  namespace: default
+spec:
+  replicas: 5
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod2", "ip":"dynamic"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods6/templates/danmnet-pods6-2.yaml b/resources/test_charts/danmnet-pods6/templates/danmnet-pods6-2.yaml
new file mode 100644 (file)
index 0000000..217af07
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods6-2
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod2", "ip":"10.0.0.1/24"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods7/Chart.yaml b/resources/test_charts/danmnet-pods7/Chart.yaml
new file mode 100644 (file)
index 0000000..eecde18
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods7
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods7/templates/danmnet-pods7_1.yaml b/resources/test_charts/danmnet-pods7/templates/danmnet-pods7_1.yaml
new file mode 100644 (file)
index 0000000..084502e
--- /dev/null
@@ -0,0 +1,31 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods7-1
+  namespace: default
+spec:
+  replicas: 5
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod4", "ip":"dynamic"
+            },
+            {
+              "clusterNetwork":"cnet-pod3", "ip":"dynamic"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-pods8/Chart.yaml b/resources/test_charts/danmnet-pods8/Chart.yaml
new file mode 100644 (file)
index 0000000..4e1e101
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods8
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods8/templates/danmnet-pods8_1.yaml b/resources/test_charts/danmnet-pods8/templates/danmnet-pods8_1.yaml
new file mode 100644 (file)
index 0000000..52c0b3b
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods8-1
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      name: danmnet-pods8-1
+      app: alpine
+  template:
+    metadata:
+      labels:
+        name: danmnet-pods8-1
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod4", "ip":"10.244.100.100/24"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "sh", "-c", "--"]
+        args: ["echo -e 'HTTP/1.0 200 OK \n\nOK'>/tmp/temp; nc -l -p 4242 < /tmp/temp; sleep 6000"]
diff --git a/resources/test_charts/danmnet-pods8/templates/danmnet-pods8_1_service.yaml b/resources/test_charts/danmnet-pods8/templates/danmnet-pods8_1_service.yaml
new file mode 100644 (file)
index 0000000..9d57b62
--- /dev/null
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    name: danmnet-pods8-1
+  name: danmnet-pods8-1
+  namespace: default
+spec:
+  ports:
+  - name: danmnet-pods8-1
+    port: 4242
+    protocol: TCP
+  selector:
+    name: danmnet-pods8-1
diff --git a/resources/test_charts/danmnet-pods9/Chart.yaml b/resources/test_charts/danmnet-pods9/Chart.yaml
new file mode 100644 (file)
index 0000000..daaa744
--- /dev/null
@@ -0,0 +1,3 @@
+name: danmnet-pods9
+version: 2.0.0
+description: pods for danmnet tests
diff --git a/resources/test_charts/danmnet-pods9/templates/danmnet-pods9_1.yaml b/resources/test_charts/danmnet-pods9/templates/danmnet-pods9_1.yaml
new file mode 100644 (file)
index 0000000..86779c9
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: danmnet-pods9-1
+  namespace: kube-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "clusterNetwork":"cnet-pod4", "ip":"10.0.0.1/24"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/danmnet-test-2/Chart.yaml b/resources/test_charts/danmnet-test-2/Chart.yaml
new file mode 100644 (file)
index 0000000..2857c09
--- /dev/null
@@ -0,0 +1,3 @@
+description: chart for danm network tests
+name: danmnet-test-2
+version: 1.1.1
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net1.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net1.yaml
new file mode 100644 (file)
index 0000000..9509302
--- /dev/null
@@ -0,0 +1,11 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net1
+  namespace: default
+spec:
+  NetworkID: test-net1
+  Options:
+    container_prefix: eth0
+    host_device: {{ .Values.infra_int_if }} 
+    rt_tables: 201
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net11.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net11.yaml
new file mode 100644 (file)
index 0000000..b21c095
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net11
+  namespace: default
+spec:
+  NetworkID: test-net11
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 18
+    vxlan: 54
+    vlan: 999
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net13.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net13.yaml
new file mode 100644 (file)
index 0000000..b158d56
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net13
+  namespace: default
+spec:
+  NetworkID: test-net13
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 20
+    vxlan: 56
\ No newline at end of file
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net15.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net15.yaml
new file mode 100644 (file)
index 0000000..a5d028d
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net15
+  namespace: default
+spec:
+  NetworkID: test-net15
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    cidr: 10.0.0.0/23
+    allocation_pool:
+      start: 10.0.0.0
+      end: 10.0.1.255
+    container_prefix: eth0
+    rt_tables: 22
+    vlan: 1
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net16.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net16.yaml
new file mode 100644 (file)
index 0000000..590b333
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net16
+  namespace: default
+spec:
+  NetworkID: test-net16
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    cidr: 10.0.0.0/24
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 23
+    vlan: 4094
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net2.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net2.yaml
new file mode 100644 (file)
index 0000000..10f465c
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net2
+  namespace: default
+spec:
+  NetworkID: test-net2
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.10
+    container_prefix: eth0
+    rt_tables: 10
+    routes:
+      10.0.0.0/32: 10.0.0.50
+    vxlan: 50
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net20.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net20.yaml
new file mode 100644 (file)
index 0000000..4f256e1
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net20
+  namespace: default
+spec:
+  NetworkID: test-net20
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 27
+    vxlan: 1
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net21.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net21.yaml
new file mode 100644 (file)
index 0000000..7935c61
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net21
+  namespace: default
+spec:
+  NetworkID: test-net21
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 28
+    vxlan: 16777214
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net23.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net23.yaml
new file mode 100644 (file)
index 0000000..2c0ea8c
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net23
+  namespace: default
+spec:
+  NetworkID: test-net23
+  NetworkType: inv#lid
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.255
+    container_prefix: eth0
+    rt_tables: 30
+    vxlan: 57
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net24.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net24.yaml
new file mode 100644 (file)
index 0000000..4efb0cb
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net24
+  namespace: default
+spec:
+  NetworkID: test-net24
+  NetworkType: flannel
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 31
+    vxlan: 58
+    vlan: 57
\ No newline at end of file
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net25.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net25.yaml
new file mode 100644 (file)
index 0000000..8c1c323
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net25
+  namespace: default
+spec:
+  NetworkID: test-net25
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    cidr: 10.10.0.0/24
+    allocation_pool:
+      start: 10.10.0.1
+      end: 10.10.0.10
+    container_prefix: eth2
+    rt_tables: 10
+    vlan: 58
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net26.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net26.yaml
new file mode 100644 (file)
index 0000000..8484967
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net26\r
+  namespace: default\r
+spec:\r
+  NetworkID: test-net26\r
+  Options:\r
+    host_device: {{ .Values.infra_int_if }} \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 10\r
+    vxlan: 60\r
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net28.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net28.yaml
new file mode 100644 (file)
index 0000000..b962458
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net28\r
+  namespace: default\r
+spec:\r
+  NetworkID: test-net28\r
+  Options:\r
+    host_device: {{ .Values.infra_int_if }} \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 33\r
+    vxlan: 50\r
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net30.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net30.yaml
new file mode 100644 (file)
index 0000000..4457804
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net30
+  namespace: default
+spec:
+  NetworkID: test-net30
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.10.0.0/24 
+    allocation_pool:
+      start: 10.10.0.1
+      end: 10.10.0.10
+    container_prefix: eth0
+    rt_tables: 10
+    routes:
+      10.10.0.0/32: 10.10.0.40
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net5.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net5.yaml
new file mode 100644 (file)
index 0000000..dddb4f8
--- /dev/null
@@ -0,0 +1,11 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net5
+  namespace: default
+spec:
+  NetworkID: test-net5
+  Options:
+    container_prefix: eth0
+    host_device: inval#d 
+    rt_tables: 14
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net7.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net7.yaml
new file mode 100644 (file)
index 0000000..7fd7aef
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net7
+  namespace: default
+spec:
+  NetworkID: test-net7
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.10
+    container_prefix: eth1
+    rt_tables: 15
+    vxlan: 53
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net8.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net8.yaml
new file mode 100644 (file)
index 0000000..b2c8702
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net8
+  namespace: default
+spec:
+  NetworkID: test-net8
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.20.0.0/24 
+    allocation_pool:
+      start: 10.20.0.1
+      end: 10.20.0.30
+    container_prefix: eth1
+    rt_tables: 15
+    vxlan: 54
diff --git a/resources/test_charts/danmnet-test-2/templates/d_test-net9.yaml b/resources/test_charts/danmnet-test-2/templates/d_test-net9.yaml
new file mode 100644 (file)
index 0000000..e9fd0d2
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net9
+  namespace: default
+spec:
+  NetworkID: test-net9
+  Options:
+    container_prefix: eth0
+    host_device: {{ .Values.infra_int_if }}
+    cidr: 10.10.0.0/24
+    allocation_pool:
+      start: 10.1.1.2
+      end: 10.1.1.50
+    rt_tables: 155
+    vlan: 55
diff --git a/resources/test_charts/danmnet-test-2/templates/ks_test-net2.yaml b/resources/test_charts/danmnet-test-2/templates/ks_test-net2.yaml
new file mode 100644 (file)
index 0000000..8f70e1b
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net2
+  namespace: kube-system
+spec:
+  NetworkID: test-net2
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.1.1.0/24 
+    allocation_pool:
+      start: 10.1.1.10
+      end: 10.1.1.15
+    container_prefix: eth0
+    rt_tables: 35
+    routes:
+      10.1.1.0/32: 10.1.1.1
+    vxlan: 50
diff --git a/resources/test_charts/danmnet-test-2/templates/ks_test-net27.yaml b/resources/test_charts/danmnet-test-2/templates/ks_test-net27.yaml
new file mode 100644 (file)
index 0000000..16a6ac1
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net27\r
+  namespace: kube-system\r
+spec:\r
+  NetworkID: test-net27\r
+  Options:\r
+    host_device: {{ .Values.infra_int_if }} \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 10\r
+    vxlan: 61\r
diff --git a/resources/test_charts/danmnet-test-2/templates/ks_test-net29.yaml b/resources/test_charts/danmnet-test-2/templates/ks_test-net29.yaml
new file mode 100644 (file)
index 0000000..cb6d21b
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net29\r
+  namespace: kube-system\r
+spec:\r
+  NetworkID: test-net29\r
+  Options:\r
+    host_device: {{ .Values.infra_int_if }} \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 34\r
+    vxlan: 50\r
diff --git a/resources/test_charts/danmnet-test-3/Chart.yaml b/resources/test_charts/danmnet-test-3/Chart.yaml
new file mode 100644 (file)
index 0000000..216d828
--- /dev/null
@@ -0,0 +1,3 @@
+description: chart for danm network tests
+name: danmnet-test-3
+version: 1.1.1
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net1.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net1.yaml
new file mode 100644 (file)
index 0000000..5b2f783
--- /dev/null
@@ -0,0 +1,11 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net1
+  namespace: default
+spec:
+  NetworkID: test-net1
+  Options:
+    container_prefix: eth0
+    host_device: ens3 
+    rt_tables: 201
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net11.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net11.yaml
new file mode 100644 (file)
index 0000000..a755f28
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net11
+  namespace: default
+spec:
+  NetworkID: test-net11
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 18
+    vxlan: 54
+    vlan: 999
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net13.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net13.yaml
new file mode 100644 (file)
index 0000000..d528042
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net13
+  namespace: default
+spec:
+  NetworkID: test-net13
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 20
+    vxlan: 56
\ No newline at end of file
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net15.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net15.yaml
new file mode 100644 (file)
index 0000000..fcc21a6
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net15
+  namespace: default
+spec:
+  NetworkID: test-net15
+  Options:
+    host_device: ens4
+    cidr: 10.0.0.0/23
+    allocation_pool:
+      start: 10.0.0.0
+      end: 10.0.1.255
+    container_prefix: eth0
+    rt_tables: 22
+    vlan: 1
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net16.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net16.yaml
new file mode 100644 (file)
index 0000000..b389a2c
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net16
+  namespace: default
+spec:
+  NetworkID: test-net16
+  Options:
+    host_device: ens4
+    cidr: 10.0.0.0/24
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 23
+    vlan: 4094
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net2.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net2.yaml
new file mode 100644 (file)
index 0000000..268c0a6
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net2
+  namespace: default
+spec:
+  NetworkID: test-net2
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.10
+    container_prefix: eth0
+    rt_tables: 10
+    routes:
+      10.0.0.0/32: 10.0.0.50
+    vxlan: 50
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net20.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net20.yaml
new file mode 100644 (file)
index 0000000..e346e22
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net20
+  namespace: default
+spec:
+  NetworkID: test-net20
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 27
+    vxlan: 1
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net21.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net21.yaml
new file mode 100644 (file)
index 0000000..96dc14d
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net21
+  namespace: default
+spec:
+  NetworkID: test-net21
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 28
+    vxlan: 16777214
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net23.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net23.yaml
new file mode 100644 (file)
index 0000000..3811740
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net23
+  namespace: default
+spec:
+  NetworkID: test-net23
+  NetworkType: inv#lid
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.255
+    container_prefix: eth0
+    rt_tables: 30
+    vxlan: 57
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net24.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net24.yaml
new file mode 100644 (file)
index 0000000..ea0df47
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net24
+  namespace: default
+spec:
+  NetworkID: test-net24
+  NetworkType: flannel
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 31
+    vxlan: 58
+    vlan: 57
\ No newline at end of file
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net25.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net25.yaml
new file mode 100644 (file)
index 0000000..b0d9dc2
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net25
+  namespace: default
+spec:
+  NetworkID: test-net25
+  Options:
+    host_device: ens4
+    cidr: 10.10.0.0/24
+    allocation_pool:
+      start: 10.10.0.1
+      end: 10.10.0.10
+    container_prefix: eth2
+    rt_tables: 10
+    vlan: 58
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net26.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net26.yaml
new file mode 100644 (file)
index 0000000..2e32fdf
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net26\r
+  namespace: default\r
+spec:\r
+  NetworkID: test-net26\r
+  Options:\r
+    host_device: ens3 \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 10\r
+    vxlan: 60\r
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net28.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net28.yaml
new file mode 100644 (file)
index 0000000..24dac6f
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net28\r
+  namespace: default\r
+spec:\r
+  NetworkID: test-net28\r
+  Options:\r
+    host_device: ens3 \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 33\r
+    vxlan: 50\r
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net30.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net30.yaml
new file mode 100644 (file)
index 0000000..c2d1873
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net30
+  namespace: default
+spec:
+  NetworkID: test-net30
+  Options:
+    host_device: ens3 
+    cidr: 10.10.0.0/24 
+    allocation_pool:
+      start: 10.10.0.1
+      end: 10.10.0.10
+    container_prefix: eth0
+    rt_tables: 10
+    routes:
+      10.10.0.0/32: 10.10.0.40
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net5.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net5.yaml
new file mode 100644 (file)
index 0000000..dddb4f8
--- /dev/null
@@ -0,0 +1,11 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net5
+  namespace: default
+spec:
+  NetworkID: test-net5
+  Options:
+    container_prefix: eth0
+    host_device: inval#d 
+    rt_tables: 14
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net7.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net7.yaml
new file mode 100644 (file)
index 0000000..207ec17
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net7
+  namespace: default
+spec:
+  NetworkID: test-net7
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.10
+    container_prefix: eth1
+    rt_tables: 15
+    vxlan: 53
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net8.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net8.yaml
new file mode 100644 (file)
index 0000000..2faa410
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net8
+  namespace: default
+spec:
+  NetworkID: test-net8
+  Options:
+    host_device: ens3 
+    cidr: 10.20.0.0/24 
+    allocation_pool:
+      start: 10.20.0.1
+      end: 10.20.0.30
+    container_prefix: eth1
+    rt_tables: 15
+    vxlan: 54
diff --git a/resources/test_charts/danmnet-test-3/templates/d_test-net9.yaml b/resources/test_charts/danmnet-test-3/templates/d_test-net9.yaml
new file mode 100644 (file)
index 0000000..623aea3
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net9
+  namespace: default
+spec:
+  NetworkID: test-net9
+  Options:
+    container_prefix: eth0
+    host_device: ens3
+    cidr: 10.10.0.0/24
+    allocation_pool:
+      start: 10.1.1.2
+      end: 10.1.1.50
+    rt_tables: 155
+    vlan: 55
diff --git a/resources/test_charts/danmnet-test-3/templates/ks_test-net2.yaml b/resources/test_charts/danmnet-test-3/templates/ks_test-net2.yaml
new file mode 100644 (file)
index 0000000..685b3e7
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net2
+  namespace: kube-system
+spec:
+  NetworkID: test-net2
+  Options:
+    host_device: ens3 
+    cidr: 10.1.1.0/24 
+    allocation_pool:
+      start: 10.1.1.10
+      end: 10.1.1.15
+    container_prefix: eth0
+    rt_tables: 35
+    routes:
+      10.1.1.0/32: 10.1.1.1
+    vxlan: 50
diff --git a/resources/test_charts/danmnet-test-3/templates/ks_test-net27.yaml b/resources/test_charts/danmnet-test-3/templates/ks_test-net27.yaml
new file mode 100644 (file)
index 0000000..b4a0cf8
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net27\r
+  namespace: kube-system\r
+spec:\r
+  NetworkID: test-net27\r
+  Options:\r
+    host_device: ens3 \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 10\r
+    vxlan: 61\r
diff --git a/resources/test_charts/danmnet-test-3/templates/ks_test-net29.yaml b/resources/test_charts/danmnet-test-3/templates/ks_test-net29.yaml
new file mode 100644 (file)
index 0000000..0ba3d0f
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net29\r
+  namespace: kube-system\r
+spec:\r
+  NetworkID: test-net29\r
+  Options:\r
+    host_device: ens3 \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 34\r
+    vxlan: 50\r
diff --git a/resources/test_charts/danmnet-test-error/Chart.yaml b/resources/test_charts/danmnet-test-error/Chart.yaml
new file mode 100644 (file)
index 0000000..8f9651f
--- /dev/null
@@ -0,0 +1,3 @@
+description: chart for danm network tests
+name: danmnet-test-error
+version: 1.1.1
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net10.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net10.yaml
new file mode 100644 (file)
index 0000000..9e20a3d
--- /dev/null
@@ -0,0 +1,14 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net10
+  namespace: default
+spec:
+  NetworkID: test-net10
+  Options:
+    host_device: ens3
+    allocation_pool:
+      start: 10.1.1.2
+      end: 10.1.1.50
+    rt_tables: 17
+    vlan: 56
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net11.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net11.yaml
new file mode 100644 (file)
index 0000000..a755f28
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net11
+  namespace: default
+spec:
+  NetworkID: test-net11
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 18
+    vxlan: 54
+    vlan: 999
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net12.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net12.yaml
new file mode 100644 (file)
index 0000000..5819869
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net12
+  namespace: default
+spec:
+  NetworkID: test-net12
+  Options:
+    host_device: ens3 
+    cidr: invalid/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 19
+    vxlan: 55
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net14.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net14.yaml
new file mode 100644 (file)
index 0000000..8c9f2bd
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net14
+  namespace: default
+spec:
+  NetworkID: test-net14
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 21
+    vlan: 0
\ No newline at end of file
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net17.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net17.yaml
new file mode 100644 (file)
index 0000000..24adbc0
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net17
+  namespace: default
+spec:
+  NetworkID: test-net17
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 24
+    vlan: 4095
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net18.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net18.yaml
new file mode 100644 (file)
index 0000000..d8eb5b2
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net18
+  namespace: default
+spec:
+  NetworkID: test-net18
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 25
+    vlan: 4096
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net19.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net19.yaml
new file mode 100644 (file)
index 0000000..f35cf31
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net19
+  namespace: default
+spec:
+  NetworkID: test-net19
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 26
+    vxlan: 0
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net22.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net22.yaml
new file mode 100644 (file)
index 0000000..b78ee60
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net22
+  namespace: default
+spec:
+  NetworkID: test-net22
+  Options:
+    host_device: ens3 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 29
+    vxlan: 16777215
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net3.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net3.yaml
new file mode 100644 (file)
index 0000000..4b9c26b
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net3
+  namespace: default
+spec:
+  NetworkID: test-net3
+  Options:
+    host_device: ens3 
+    cidr: 0:0:0:0:0:ffff:0f00:0000/120
+    allocation_pool:
+      start: 0:0:0:0:0:ffff:0f00:0001
+      end: 0:0:0:0:0:ffff:0f00:0014
+    container_prefix: eth0
+    rt_tables: 12
+    vxlan: 51
diff --git a/resources/test_charts/danmnet-test-error/templates/d_test-net9.yaml b/resources/test_charts/danmnet-test-error/templates/d_test-net9.yaml
new file mode 100644 (file)
index 0000000..623aea3
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net9
+  namespace: default
+spec:
+  NetworkID: test-net9
+  Options:
+    container_prefix: eth0
+    host_device: ens3
+    cidr: 10.10.0.0/24
+    allocation_pool:
+      start: 10.1.1.2
+      end: 10.1.1.50
+    rt_tables: 155
+    vlan: 55
diff --git a/resources/test_charts/danmnet-test/Chart.yaml b/resources/test_charts/danmnet-test/Chart.yaml
new file mode 100644 (file)
index 0000000..58def20
--- /dev/null
@@ -0,0 +1,3 @@
+description: chart for danm network tests
+name: danmnet-test
+version: 1.1.1
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net1.yaml b/resources/test_charts/danmnet-test/templates/d_test-net1.yaml
new file mode 100644 (file)
index 0000000..9509302
--- /dev/null
@@ -0,0 +1,11 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net1
+  namespace: default
+spec:
+  NetworkID: test-net1
+  Options:
+    container_prefix: eth0
+    host_device: {{ .Values.infra_int_if }} 
+    rt_tables: 201
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net13.yaml b/resources/test_charts/danmnet-test/templates/d_test-net13.yaml
new file mode 100644 (file)
index 0000000..b158d56
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net13
+  namespace: default
+spec:
+  NetworkID: test-net13
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 20
+    vxlan: 56
\ No newline at end of file
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net15.yaml b/resources/test_charts/danmnet-test/templates/d_test-net15.yaml
new file mode 100644 (file)
index 0000000..a5d028d
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net15
+  namespace: default
+spec:
+  NetworkID: test-net15
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    cidr: 10.0.0.0/23
+    allocation_pool:
+      start: 10.0.0.0
+      end: 10.0.1.255
+    container_prefix: eth0
+    rt_tables: 22
+    vlan: 1
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net16.yaml b/resources/test_charts/danmnet-test/templates/d_test-net16.yaml
new file mode 100644 (file)
index 0000000..590b333
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net16
+  namespace: default
+spec:
+  NetworkID: test-net16
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    cidr: 10.0.0.0/24
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 23
+    vlan: 4094
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net2.yaml b/resources/test_charts/danmnet-test/templates/d_test-net2.yaml
new file mode 100644 (file)
index 0000000..10f465c
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net2
+  namespace: default
+spec:
+  NetworkID: test-net2
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.10
+    container_prefix: eth0
+    rt_tables: 10
+    routes:
+      10.0.0.0/32: 10.0.0.50
+    vxlan: 50
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net20.yaml b/resources/test_charts/danmnet-test/templates/d_test-net20.yaml
new file mode 100644 (file)
index 0000000..4f256e1
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net20
+  namespace: default
+spec:
+  NetworkID: test-net20
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 27
+    vxlan: 1
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net21.yaml b/resources/test_charts/danmnet-test/templates/d_test-net21.yaml
new file mode 100644 (file)
index 0000000..7935c61
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net21
+  namespace: default
+spec:
+  NetworkID: test-net21
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.20
+    container_prefix: eth0
+    rt_tables: 28
+    vxlan: 16777214
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net23.yaml b/resources/test_charts/danmnet-test/templates/d_test-net23.yaml
new file mode 100644 (file)
index 0000000..2c0ea8c
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net23
+  namespace: default
+spec:
+  NetworkID: test-net23
+  NetworkType: inv#lid
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.255
+    container_prefix: eth0
+    rt_tables: 30
+    vxlan: 57
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net24.yaml b/resources/test_charts/danmnet-test/templates/d_test-net24.yaml
new file mode 100644 (file)
index 0000000..585773c
--- /dev/null
@@ -0,0 +1,9 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net24
+  namespace: default
+spec:
+  NetworkID: test-net24
+  NetworkType: flannel
\ No newline at end of file
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net25.yaml b/resources/test_charts/danmnet-test/templates/d_test-net25.yaml
new file mode 100644 (file)
index 0000000..8c1c323
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net25
+  namespace: default
+spec:
+  NetworkID: test-net25
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    cidr: 10.10.0.0/24
+    allocation_pool:
+      start: 10.10.0.1
+      end: 10.10.0.10
+    container_prefix: eth2
+    rt_tables: 10
+    vlan: 58
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net26.yaml b/resources/test_charts/danmnet-test/templates/d_test-net26.yaml
new file mode 100644 (file)
index 0000000..8484967
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net26\r
+  namespace: default\r
+spec:\r
+  NetworkID: test-net26\r
+  Options:\r
+    host_device: {{ .Values.infra_int_if }} \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 10\r
+    vxlan: 60\r
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net28.yaml b/resources/test_charts/danmnet-test/templates/d_test-net28.yaml
new file mode 100644 (file)
index 0000000..b962458
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net28\r
+  namespace: default\r
+spec:\r
+  NetworkID: test-net28\r
+  Options:\r
+    host_device: {{ .Values.infra_int_if }} \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 33\r
+    vxlan: 50\r
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net30.yaml b/resources/test_charts/danmnet-test/templates/d_test-net30.yaml
new file mode 100644 (file)
index 0000000..4457804
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net30
+  namespace: default
+spec:
+  NetworkID: test-net30
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.10.0.0/24 
+    allocation_pool:
+      start: 10.10.0.1
+      end: 10.10.0.10
+    container_prefix: eth0
+    rt_tables: 10
+    routes:
+      10.10.0.0/32: 10.10.0.40
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net4.yaml b/resources/test_charts/danmnet-test/templates/d_test-net4.yaml
new file mode 100644 (file)
index 0000000..29b6454
--- /dev/null
@@ -0,0 +1,9 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net4
+  namespace: default
+spec:
+  NetworkID: test-net4
+  Options:
+    rt_tables: 13
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net5.yaml b/resources/test_charts/danmnet-test/templates/d_test-net5.yaml
new file mode 100644 (file)
index 0000000..dddb4f8
--- /dev/null
@@ -0,0 +1,11 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net5
+  namespace: default
+spec:
+  NetworkID: test-net5
+  Options:
+    container_prefix: eth0
+    host_device: inval#d 
+    rt_tables: 14
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net6.yaml b/resources/test_charts/danmnet-test/templates/d_test-net6.yaml
new file mode 100644 (file)
index 0000000..14768f8
--- /dev/null
@@ -0,0 +1,11 @@
+apiVersion: danm.k8s.io/v1
+kind: DanmNet
+metadata:
+  name: test-net6
+  namespace: default
+spec:
+  NetworkID: test-net6
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    cidr: 10.0.0.0/24
+    vxlan: 52
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net7.yaml b/resources/test_charts/danmnet-test/templates/d_test-net7.yaml
new file mode 100644 (file)
index 0000000..7fd7aef
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net7
+  namespace: default
+spec:
+  NetworkID: test-net7
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.0.0.0/24 
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.10
+    container_prefix: eth1
+    rt_tables: 15
+    vxlan: 53
diff --git a/resources/test_charts/danmnet-test/templates/d_test-net8.yaml b/resources/test_charts/danmnet-test/templates/d_test-net8.yaml
new file mode 100644 (file)
index 0000000..b2c8702
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net8
+  namespace: default
+spec:
+  NetworkID: test-net8
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.20.0.0/24 
+    allocation_pool:
+      start: 10.20.0.1
+      end: 10.20.0.30
+    container_prefix: eth1
+    rt_tables: 15
+    vxlan: 54
diff --git a/resources/test_charts/danmnet-test/templates/ks_test-net2.yaml b/resources/test_charts/danmnet-test/templates/ks_test-net2.yaml
new file mode 100644 (file)
index 0000000..8f70e1b
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1 
+kind: DanmNet
+metadata:
+  name: test-net2
+  namespace: kube-system
+spec:
+  NetworkID: test-net2
+  Options:
+    host_device: {{ .Values.infra_int_if }} 
+    cidr: 10.1.1.0/24 
+    allocation_pool:
+      start: 10.1.1.10
+      end: 10.1.1.15
+    container_prefix: eth0
+    rt_tables: 35
+    routes:
+      10.1.1.0/32: 10.1.1.1
+    vxlan: 50
diff --git a/resources/test_charts/danmnet-test/templates/ks_test-net27.yaml b/resources/test_charts/danmnet-test/templates/ks_test-net27.yaml
new file mode 100644 (file)
index 0000000..16a6ac1
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net27\r
+  namespace: kube-system\r
+spec:\r
+  NetworkID: test-net27\r
+  Options:\r
+    host_device: {{ .Values.infra_int_if }} \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 10\r
+    vxlan: 61\r
diff --git a/resources/test_charts/danmnet-test/templates/ks_test-net29.yaml b/resources/test_charts/danmnet-test/templates/ks_test-net29.yaml
new file mode 100644 (file)
index 0000000..cb6d21b
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1 \r
+kind: DanmNet\r
+metadata:\r
+  name: test-net29\r
+  namespace: kube-system\r
+spec:\r
+  NetworkID: test-net29\r
+  Options:\r
+    host_device: {{ .Values.infra_int_if }} \r
+    cidr: 10.0.0.0/24 \r
+    allocation_pool:\r
+      start: 10.0.0.1\r
+      end: 10.0.0.20\r
+    container_prefix: eth0\r
+    rt_tables: 34\r
+    vxlan: 50\r
diff --git a/resources/test_charts/http-traffic-gen/Chart.yaml b/resources/test_charts/http-traffic-gen/Chart.yaml
new file mode 100644 (file)
index 0000000..524a6d9
--- /dev/null
@@ -0,0 +1,5 @@
+apiVersion: v1
+appVersion: "1.0"
+description: A Helm chart for Kubernetes
+name: http-traffic-gen
+version: 0.4.0
diff --git a/resources/test_charts/http-traffic-gen/templates/http-traffic-gen-dep.yaml b/resources/test_charts/http-traffic-gen/templates/http-traffic-gen-dep.yaml
new file mode 100644 (file)
index 0000000..6879538
--- /dev/null
@@ -0,0 +1,14 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: http-traffic-gen
+  namespace: default
+spec:
+  template:
+    metadata:
+      name:  http-traffic-gen
+    spec:
+      containers:
+      - name: http-traffic-gen
+        image: {{ .Values.registry_url }}:5555/caas/http-traffic-gen:latest
+      restartPolicy: Never
diff --git a/resources/test_charts/load-generator-for-apache/Chart.yaml b/resources/test_charts/load-generator-for-apache/Chart.yaml
new file mode 100644 (file)
index 0000000..f735ac9
--- /dev/null
@@ -0,0 +1,3 @@
+name: load-generator-for-apache
+version: 1.1.1 
+description: load generator for HPA test
diff --git a/resources/test_charts/load-generator-for-apache/templates/load-generator.yml b/resources/test_charts/load-generator-for-apache/templates/load-generator.yml
new file mode 100644 (file)
index 0000000..c121a52
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+  name: load-generator-for-apache
+  namespace: default
+spec:
+  template:
+    metadata:
+      name:  load-generator-for-apache
+    spec:
+      containers:
+      - name: busybox
+        image: {{ .Values.registry_url }}:5555/caas/busybox:latest
+        command: ["/bin/sh", "-c", "--"]
+        args: ["START=`date +%s`; while [ $(( $(date +%s) - 90 )) -lt $START ]; do wget -q -O- http://10.254.142.142:80; done;"]
+      restartPolicy: Never
diff --git a/resources/test_charts/logger/Chart.yaml b/resources/test_charts/logger/Chart.yaml
new file mode 100644 (file)
index 0000000..5bca602
--- /dev/null
@@ -0,0 +1,3 @@
+name: logger
+version: 0.0.1 
+description: logger container
diff --git a/resources/test_charts/logger/templates/logger.yaml b/resources/test_charts/logger/templates/logger.yaml
new file mode 100644 (file)
index 0000000..1d3ffd5
--- /dev/null
@@ -0,0 +1,31 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: logger
+  namespace: kube-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      ncrf-image: logger
+  template:
+    metadata:
+      labels:
+        ncrf-image: logger
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+        - name: loggen
+          image: registry.kube-system.svc.nokia.net:5555/caas/logger:latest
+          env:
+            - name: STRPS
+              value: "500"
+              #STRPS - string per second is the amount of lines per second
+            - name: STRLEN
+              value: "160"
+              #STRLEN - length of the string
+            - name: SPREAD
+              value: "20"
+              #SPREAD - average deviation from the average length value: 0-100 where 0 there is no deviation
+
diff --git a/resources/test_charts/network-attach-test/Chart.yaml b/resources/test_charts/network-attach-test/Chart.yaml
new file mode 100644 (file)
index 0000000..b8ae535
--- /dev/null
@@ -0,0 +1,3 @@
+description: chart for netowrk attach tests
+name: network-attach-test
+version: 1.0.0 
\ No newline at end of file
diff --git a/resources/test_charts/network-attach-test/templates/cnet_pod1.yaml b/resources/test_charts/network-attach-test/templates/cnet_pod1.yaml
new file mode 100644 (file)
index 0000000..3dddde5
--- /dev/null
@@ -0,0 +1,18 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-pod1
+spec:
+  NetworkID: cnet-pod1
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    vxlan: 51
+    container_prefix: ext
+    rt_tables: 100
+    routes:
+      10.0.0.0/32: 10.5.1.1
+    cidr: 10.5.1.0/16
+    allocation_pool:
+      start: 10.5.1.11
+      end: 10.5.1.20
diff --git a/resources/test_charts/network-attach-test/templates/cnet_pod2.yaml b/resources/test_charts/network-attach-test/templates/cnet_pod2.yaml
new file mode 100644 (file)
index 0000000..7a68c5c
--- /dev/null
@@ -0,0 +1,12 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-pod2
+spec:
+  NetworkID: cnet-pod2
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    vxlan: 52
+    container_prefix: ext
+    rt_tables: 100
\ No newline at end of file
diff --git a/resources/test_charts/network-attach-test/templates/cnet_pod3.yaml b/resources/test_charts/network-attach-test/templates/cnet_pod3.yaml
new file mode 100644 (file)
index 0000000..3a2b462
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-pod3
+spec:
+  NetworkID: cnet-pod3
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    vxlan: 53
+    container_prefix: eth1
+    rt_tables: 100
+    cidr: 10.0.0.0/24
+    allocation_pool:
+      start: 10.0.0.1
+      end: 10.0.0.10 
diff --git a/resources/test_charts/network-attach-test/templates/cnet_pod4.yaml b/resources/test_charts/network-attach-test/templates/cnet_pod4.yaml
new file mode 100644 (file)
index 0000000..b90759e
--- /dev/null
@@ -0,0 +1,7 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-pod4
+spec:
+  NetworkID: flannel
+  NetworkType: flannel
diff --git a/resources/test_charts/network-attach-test/templates/cnet_pod5.yaml b/resources/test_charts/network-attach-test/templates/cnet_pod5.yaml
new file mode 100644 (file)
index 0000000..bccd16f
--- /dev/null
@@ -0,0 +1,17 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-pod5
+spec:
+  NetworkID: cnet-pod5
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    container_prefix: eth0
+    rt_tables: 100
+    cidr: 10.10.0.0/24
+    allocation_pool:
+      start: 10.10.0.1
+      end: 10.10.0.10
+    routes:
+      10.10.0.0/32: 10.10.0.40 
\ No newline at end of file
diff --git a/resources/test_charts/network-attach-test/templates/cnet_pod6.yaml b/resources/test_charts/network-attach-test/templates/cnet_pod6.yaml
new file mode 100644 (file)
index 0000000..a4bb9d6
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-pod6
+spec:
+  NetworkID: cnet-pod6
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    vxlan: 56
+    container_prefix: eth1
+    rt_tables: 100
+    cidr: 10.20.0.0/24
+    allocation_pool:
+      start: 10.20.0.1
+      end: 10.20.0.10
\ No newline at end of file
diff --git a/resources/test_charts/network-attach-test/templates/cnet_pod7.yaml b/resources/test_charts/network-attach-test/templates/cnet_pod7.yaml
new file mode 100644 (file)
index 0000000..4633d1f
--- /dev/null
@@ -0,0 +1,16 @@
+apiVersion: danm.k8s.io/v1
+kind: ClusterNetwork
+metadata:
+  name: cnet-pod7
+spec:
+  NetworkID: cnet-pod7
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    vxlan: 57
+    container_prefix: eth2
+    rt_tables: 10
+    cidr: 10.10.0.0/24
+    allocation_pool:
+      start: 10.10.0.1
+      end: 10.10.0.10
\ No newline at end of file
diff --git a/resources/test_charts/persistentvolume-claim/Chart.yaml b/resources/test_charts/persistentvolume-claim/Chart.yaml
new file mode 100644 (file)
index 0000000..617cb83
--- /dev/null
@@ -0,0 +1,3 @@
+name: persistentvolume-claim
+version: 1.1.2
+description: persistentvolume-claim for storage test
diff --git a/resources/test_charts/persistentvolume-claim/templates/persistentvolume-claim.yml b/resources/test_charts/persistentvolume-claim/templates/persistentvolume-claim.yml
new file mode 100644 (file)
index 0000000..a94b18f
--- /dev/null
@@ -0,0 +1,10 @@
+kind: PersistentVolumeClaim\r
+apiVersion: v1\r
+metadata:\r
+  name: task-pv-claim\r
+spec:\r
+  accessModes:\r
+    - ReadWriteOnce\r
+  resources:\r
+    requests:\r
+      storage: 1Gi\r
diff --git a/resources/test_charts/php-apache/Chart.yaml b/resources/test_charts/php-apache/Chart.yaml
new file mode 100644 (file)
index 0000000..95e03aa
--- /dev/null
@@ -0,0 +1,3 @@
+name: php-apache
+version: 1.1.1 
+description: apache server for HPA test
diff --git a/resources/test_charts/php-apache/templates/flannel.yaml b/resources/test_charts/php-apache/templates/flannel.yaml
new file mode 100644 (file)
index 0000000..7409ffa
--- /dev/null
@@ -0,0 +1,9 @@
+---
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: default
+  namespace: default
+spec:
+  NetworkID: flannel
+  NetworkType: flannel
\ No newline at end of file
diff --git a/resources/test_charts/php-apache/templates/php-apache-deployment.yml b/resources/test_charts/php-apache/templates/php-apache-deployment.yml
new file mode 100644 (file)
index 0000000..0153907
--- /dev/null
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: php-apache-deployment
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: php-apache
+  template: 
+    metadata:
+      labels:
+        app: php-apache
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+             {
+                "tenantnetwork":"default"
+             }
+          ]
+    spec:
+      nodeSelector:
+        nodetype: caas_master
+      containers:
+      - name: php-apache
+        image: {{ .Values.registry_url }}:5555/caas/php-apache:latest
+        resources:
+          limits:
+            cpu: "0.2"
+            memory: "64Mi"
+          requests:
+            cpu: "0.2"
+            memory: "64Mi"
+
diff --git a/resources/test_charts/php-apache/templates/php-apache-hpa.yml b/resources/test_charts/php-apache/templates/php-apache-hpa.yml
new file mode 100644 (file)
index 0000000..0a2a65f
--- /dev/null
@@ -0,0 +1,13 @@
+apiVersion: autoscaling/v1
+kind: HorizontalPodAutoscaler
+metadata:
+  name: php-apache-hpa
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: php-apache-deployment
+  minReplicas: 1
+  maxReplicas: 5
+  targetCPUUtilizationPercentage: 50
+
diff --git a/resources/test_charts/php-apache/templates/php-apache-service.yml b/resources/test_charts/php-apache/templates/php-apache-service.yml
new file mode 100644 (file)
index 0000000..5b9cd50
--- /dev/null
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: php-apache-svc
+  labels:
+    app: php-apache
+spec:
+  clusterIP: 10.254.142.142
+  ports:
+  - port: 80
+  selector:
+    app: php-apache
+
diff --git a/resources/test_charts/storage-test-oam/Chart.yaml b/resources/test_charts/storage-test-oam/Chart.yaml
new file mode 100644 (file)
index 0000000..0a8f7d6
--- /dev/null
@@ -0,0 +1,3 @@
+name: storage-test-oam
+version: 1.1.2
+description: deployment for storage test for minimal deployments
diff --git a/resources/test_charts/storage-test-oam/templates/pv-test-deployment.yml b/resources/test_charts/storage-test-oam/templates/pv-test-deployment.yml
new file mode 100644 (file)
index 0000000..68f5468
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: pv-test-deployment
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      app: pv-test
+  template:
+    metadata:
+      labels:
+        app: pv-test
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      volumes:
+      - name: pv-test
+        persistentVolumeClaim:
+          claimName: task-pv-claim
+      containers:
+      - name: pv-test-pod
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
+        volumeMounts:
+        - mountPath: "/usr/share/storage_test"
+          name: pv-test
diff --git a/resources/test_charts/storage-test-worker/Chart.yaml b/resources/test_charts/storage-test-worker/Chart.yaml
new file mode 100644 (file)
index 0000000..397764a
--- /dev/null
@@ -0,0 +1,3 @@
+name: storage-test-worker
+version: 1.1.3
+description: deployment for storage test for worker deployments
diff --git a/resources/test_charts/storage-test-worker/templates/pv-test-deployment.yml b/resources/test_charts/storage-test-worker/templates/pv-test-deployment.yml
new file mode 100644 (file)
index 0000000..68f5468
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: pv-test-deployment
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      app: pv-test
+  template:
+    metadata:
+      labels:
+        app: pv-test
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      volumes:
+      - name: pv-test
+        persistentVolumeClaim:
+          claimName: task-pv-claim
+      containers:
+      - name: pv-test-pod
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
+        volumeMounts:
+        - mountPath: "/usr/share/storage_test"
+          name: pv-test
diff --git a/resources/test_charts/su-test/Chart.yaml b/resources/test_charts/su-test/Chart.yaml
new file mode 100644 (file)
index 0000000..e9c285f
--- /dev/null
@@ -0,0 +1,3 @@
+name: su-test
+version: 1.1.1
+description: test chart for software package change custom cbam operation
diff --git a/resources/test_charts/su-test/templates/su-test.yaml b/resources/test_charts/su-test/templates/su-test.yaml
new file mode 100644 (file)
index 0000000..09a9071
--- /dev/null
@@ -0,0 +1,22 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: su-test
+  namespace: kube-system
+spec:
+  replicas: 10
+  selector:
+    matchLabels:
+      app: su-test
+  strategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: su-test
+    spec:
+      containers:
+      - name: busybox
+        image: {{ .Values.registry_url }}:5555/caas/busybox:latest
+        args: ["sleep", "{{ .Values.sleep_time }}"]
+
diff --git a/resources/test_charts/su-test/values.yaml b/resources/test_charts/su-test/values.yaml
new file mode 100644 (file)
index 0000000..d1dc84e
--- /dev/null
@@ -0,0 +1 @@
+sleep_time: "1000"
\ No newline at end of file
diff --git a/resources/test_charts/tenantconfig-test-error/Chart.yaml b/resources/test_charts/tenantconfig-test-error/Chart.yaml
new file mode 100644 (file)
index 0000000..f17bf28
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantconfig-test-error
+version: 1.0.0
+description: test chart for invalid TenantConfig manifests
\ No newline at end of file
diff --git a/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_01.yaml b/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_01.yaml
new file mode 100644 (file)
index 0000000..5c73711
--- /dev/null
@@ -0,0 +1,7 @@
+# erroneous TenantConfig with empty hostDevices and networkIds
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-invalid-01
+hostDevices:
+networkIds:
diff --git a/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_02.yaml b/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_02.yaml
new file mode 100644 (file)
index 0000000..43f2747
--- /dev/null
@@ -0,0 +1,5 @@
+# erroneous TenantConfig missing both the hostDevices and networkIds section
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-invalid-02
diff --git a/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_03.yaml b/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_03.yaml
new file mode 100644 (file)
index 0000000..e13b931
--- /dev/null
@@ -0,0 +1,8 @@
+# erroneous TenantConfig with one host_device with missing vniRange
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-invalid-03
+hostDevices:
+- name: ens4
+  vniType: vlan
diff --git a/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_04.yaml b/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_04.yaml
new file mode 100644 (file)
index 0000000..eda2886
--- /dev/null
@@ -0,0 +1,8 @@
+# erroneous TenantConfig with one host_device with missing vniType    
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-invalid-04
+hostDevices:
+- name: ens4
+  vniRange: 1000-2000
diff --git a/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_05.yaml b/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_05.yaml
new file mode 100644 (file)
index 0000000..82de810
--- /dev/null
@@ -0,0 +1,13 @@
+# erroneous TenantConfig with multiple hostDevices, where one has missing vniRange
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-invalid-05
+hostDevices:
+- name: ens4
+  vniType: vlan
+- name: ens5
+  vniType: vlan
+  vniRange: 2000-3000
+networkIds:
+  flannel: flannel
diff --git a/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_06.yaml b/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_06.yaml
new file mode 100644 (file)
index 0000000..52bed8e
--- /dev/null
@@ -0,0 +1,14 @@
+# erroneous TenantConfig with multiple hostDevices where one is missing vniType
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-invalid-06
+hostDevices:
+- name: ens4
+  vniType: vlan
+  vniRange: 1000-2000
+- name: ens5
+  vniRange: 2000-3000
+networkIds:
+  ipvlan:  internal
+  flannel: flannel
diff --git a/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_07.yaml b/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_07.yaml
new file mode 100644 (file)
index 0000000..9e99b4b
--- /dev/null
@@ -0,0 +1,11 @@
+# erroneous TenantConfig with one host_device, and malformed networkIds which is missing the networkType from the networkType: networkId mapping
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-invalid-07
+hostDevices:
+- name: ens4
+  vniType: vlan
+  vniRange: 1000-2000
+networkIds:
+  : flannel
diff --git a/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_08.yaml b/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_08.yaml
new file mode 100644 (file)
index 0000000..57de74b
--- /dev/null
@@ -0,0 +1,11 @@
+# erroneous TenantConfig with one host_device, and malformed networkIds which is missing the networkId from the networkType: networkId mapping
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-invalid-08
+hostDevices:
+- name: ens4
+  vniType: vlan
+  vniRange: 1000-2000
+networkIds:
+  flannel: 
diff --git a/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_09.yaml b/resources/test_charts/tenantconfig-test-error/templates/tconf_invalid_09.yaml
new file mode 100644 (file)
index 0000000..0ed45c9
--- /dev/null
@@ -0,0 +1,11 @@
+# erroneous TenantConfig with one host_device, and malformed networkIds where the networkId is longer than the allowed length for networkId (longer than 11)
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-invalid-09
+hostDevices:
+- name: ens4
+  vniType: vlan
+  vniRange: 1000-2000
+networkIds:
+  ipvlan: twelve-chars
diff --git a/resources/test_charts/tenantconfig-test/Chart.yaml b/resources/test_charts/tenantconfig-test/Chart.yaml
new file mode 100644 (file)
index 0000000..bf9c79c
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantconfig-test
+version: 1.0.0
+description: test chart for valid TenantConfig manifests
\ No newline at end of file
diff --git a/resources/test_charts/tenantconfig-test/templates/tconf_01.yaml b/resources/test_charts/tenantconfig-test/templates/tconf_01.yaml
new file mode 100644 (file)
index 0000000..0895974
--- /dev/null
@@ -0,0 +1,7 @@
+# TenantConfig with one host_device, without vniType & vniRange, and no networkType: networkId mapping
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-01
+hostDevices:
+- name: {{ .Values.infra_ext_if }}  
diff --git a/resources/test_charts/tenantconfig-test/templates/tconf_02.yaml b/resources/test_charts/tenantconfig-test/templates/tconf_02.yaml
new file mode 100644 (file)
index 0000000..8cdc002
--- /dev/null
@@ -0,0 +1,9 @@
+# TenantConfig with one host_device without vniType & vniRange, with networkType: networkId mapping
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-02
+hostDevices:
+- name: {{ .Values.infra_int_if }}  
+networkIds:
+  ipvlan: internal
diff --git a/resources/test_charts/tenantconfig-test/templates/tconf_03.yaml b/resources/test_charts/tenantconfig-test/templates/tconf_03.yaml
new file mode 100644 (file)
index 0000000..f90eff8
--- /dev/null
@@ -0,0 +1,9 @@
+# TenantConfig with one host_device with vniType & vniRange, without networkType: networkId mapping
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-03
+hostDevices:
+- name: {{ .Values.infra_ext_if }}  
+  vniType: vlan
+  vniRange: 1000-2000
diff --git a/resources/test_charts/tenantconfig-test/templates/tconf_04.yaml b/resources/test_charts/tenantconfig-test/templates/tconf_04.yaml
new file mode 100644 (file)
index 0000000..ffb5823
--- /dev/null
@@ -0,0 +1,11 @@
+# TenantConfig with one host_device with vniType and vniRange, and with one networkType: networkId mapping
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-04
+hostDevices:
+- name: {{ .Values.infra_int_if }}  
+  vniType: vlan
+  vniRange: 1000-2000
+networkIds:
+  flannel: flannel
diff --git a/resources/test_charts/tenantconfig-test/templates/tconf_05.yaml b/resources/test_charts/tenantconfig-test/templates/tconf_05.yaml
new file mode 100644 (file)
index 0000000..de5e234
--- /dev/null
@@ -0,0 +1,9 @@
+# TenantConfig with multiple hostDevices without vniType and vniRange, and without networkType: networkId mapping for networkIds
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-05
+hostDevices:
+- name: {{ .Values.infra_int_if }}  
+- name: {{ .Values.infra_ext_if }}  
+- name: {{ .Values.infra_storage_if }}  
diff --git a/resources/test_charts/tenantconfig-test/templates/tconf_06.yaml b/resources/test_charts/tenantconfig-test/templates/tconf_06.yaml
new file mode 100644 (file)
index 0000000..4689435
--- /dev/null
@@ -0,0 +1,10 @@
+# TenantConfig with multiple hostDevices without vniType & vniRange, and with networkType: networkId mapping for networkIds
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-06
+hostDevices:
+- name: {{ .Values.infra_ext_if }}  
+- name: {{ .Values.infra_storage_if }}  
+networkIds:
+  flannel: flannel
diff --git a/resources/test_charts/tenantconfig-test/templates/tconf_07.yaml b/resources/test_charts/tenantconfig-test/templates/tconf_07.yaml
new file mode 100644 (file)
index 0000000..9d840ba
--- /dev/null
@@ -0,0 +1,15 @@
+# TenantConfig with multiple hostDevices with/without vniType & vniRange, and no networkType: networkId mapping for networkIds
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-07
+hostDevices:
+- name: {{ .Values.infra_int_if }}  
+  vniType: vlan
+  vniRange: 1000-1100
+- name: {{ .Values.infra_ext_if }}  
+  vniType: vxlan
+  vniRange: 1000-2000
+- name: {{ .Values.infra_storage_if }}  
+  vniType: vlan
+  vniRange: 2000-3000
diff --git a/resources/test_charts/tenantconfig-test/templates/tconf_08.yaml b/resources/test_charts/tenantconfig-test/templates/tconf_08.yaml
new file mode 100644 (file)
index 0000000..53e099e
--- /dev/null
@@ -0,0 +1,16 @@
+# TenantConfig with multiple hostDevices with/without vniType & vniRange, with multiple networkType: networkId mapping for networkIds
+apiVersion: danm.k8s.io/v1
+kind: TenantConfig
+metadata:
+  name: tconf-08
+hostDevices:
+- name: {{ .Values.infra_int_if }}  
+  vniType: vlan
+  vniRange: 1000-2000
+- name: {{ .Values.infra_ext_if }}  
+- name: {{ .Values.infra_storage_if }}  
+  vniType: vlan
+  vniRange: 2000-3000
+networkIds:
+  ipvlan:  internal
+  flannel: flannel
diff --git a/resources/test_charts/tenantnetwork-attach-pod1/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod1/Chart.yaml
new file mode 100644 (file)
index 0000000..98c6826
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod1
+version: 1.0.0
+description: pods for TenantNetwork tests; static ip alloc
diff --git a/resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_01.yaml b/resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_01.yaml
new file mode 100644 (file)
index 0000000..461d73d
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-01-1
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"10.240.1.1/24"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_02.yaml b/resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_02.yaml
new file mode 100644 (file)
index 0000000..32c87c7
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-01-2
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"10.240.1.8/24"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_03.yaml b/resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_03.yaml
new file mode 100644 (file)
index 0000000..9a0f43d
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-01-3
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"10.240.1.9/24"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_04.yaml b/resources/test_charts/tenantnetwork-attach-pod1/templates/tennet_pod_01_04.yaml
new file mode 100644 (file)
index 0000000..6065430
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-01-4
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"10.240.1.254/24"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod10/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod10/Chart.yaml
new file mode 100644 (file)
index 0000000..9b5a890
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod10
+version: 1.0.0
+description: pods for TenantNetwork tests; attach with 'None' ip
diff --git a/resources/test_charts/tenantnetwork-attach-pod10/templates/tennet_pod_10.yaml b/resources/test_charts/tenantnetwork-attach-pod10/templates/tennet_pod_10.yaml
new file mode 100644 (file)
index 0000000..3b48b8f
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-10
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"none"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod11/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod11/Chart.yaml
new file mode 100644 (file)
index 0000000..240aac6
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod11
+version: 1.0.0
+description: pods for TenantNetwork tests; attach to multiple TenantNetwork-s, one of which does not exist
diff --git a/resources/test_charts/tenantnetwork-attach-pod11/templates/tennet_pod_11.yaml b/resources/test_charts/tenantnetwork-attach-pod11/templates/tennet_pod_11.yaml
new file mode 100644 (file)
index 0000000..5a75dec
--- /dev/null
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-11
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-05", "ip":"10.20.5.101/24"
+            },
+            {
+              "tenantNetwork":"dummy", "ip":"dynamic"
+            },
+            {
+              "tenantNetwork":"tennet-attach-06", "ip":"none"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod12/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod12/Chart.yaml
new file mode 100644 (file)
index 0000000..d23579b
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod12
+version: 1.0.0
+description: pods for TenantNetwork tests; attach to multiple (3) TenantNetwork-s, static, dynamic, none ip allocation
diff --git a/resources/test_charts/tenantnetwork-attach-pod12/templates/tennet_pod_12.yaml b/resources/test_charts/tenantnetwork-attach-pod12/templates/tennet_pod_12.yaml
new file mode 100644 (file)
index 0000000..d39e7e1
--- /dev/null
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-12
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-05", "ip":"10.20.5.101/24"
+            },
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"dynamic"
+            },
+            {
+              "tenantNetwork":"tennet-attach-06", "ip":"none"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod13/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod13/Chart.yaml
new file mode 100644 (file)
index 0000000..07d42ee
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod13
+version: 1.0.0
+description: pods for TenantNetwork tests; attach to multiple networks, one of which does not exist
diff --git a/resources/test_charts/tenantnetwork-attach-pod13/templates/tennet_pod_13.yaml b/resources/test_charts/tenantnetwork-attach-pod13/templates/tennet_pod_13.yaml
new file mode 100644 (file)
index 0000000..458c4ef
--- /dev/null
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-13
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-06", "ip":"10.20.6.10/24"
+            },
+            {
+              "tenantNetwork":"zsafol", "ip":"dynamic"
+            },
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"none"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod14/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod14/Chart.yaml
new file mode 100644 (file)
index 0000000..df9bc8f
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod14
+version: 1.0.0
+description: pods for TenantNetwork tests; attach to multiple (3) TenantNetwork-s, static & dynamic ip allocation
diff --git a/resources/test_charts/tenantnetwork-attach-pod14/templates/tennet_pod_14.yaml b/resources/test_charts/tenantnetwork-attach-pod14/templates/tennet_pod_14.yaml
new file mode 100644 (file)
index 0000000..e721458
--- /dev/null
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-14
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-06", "ip":"10.20.6.10/24"
+            },
+            {
+              "tenantNetwork":"tennet-attach-05", "ip":"dynamic"
+            },
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"10.240.1.5/24"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod2/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod2/Chart.yaml
new file mode 100644 (file)
index 0000000..df2d125
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod2
+version: 1.0.0
+description: pods for TenantNetwork tests; dynamic ip allocation of mostly taken ip-s; separately deployable, but 4 replicas fail during testing due to ip shortage
diff --git a/resources/test_charts/tenantnetwork-attach-pod2/templates/tennet_pod_02.yaml b/resources/test_charts/tenantnetwork-attach-pod2/templates/tennet_pod_02.yaml
new file mode 100644 (file)
index 0000000..b3c2657
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-02
+  namespace: default
+spec:
+  replicas: 10
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"dynamic"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod3/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod3/Chart.yaml
new file mode 100644 (file)
index 0000000..a8e9246
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod3
+version: 1.0.0
+description: pods for TenantNetwork tests; static ip allocation of taken ip-s - fails
diff --git a/resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_01.yaml b/resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_01.yaml
new file mode 100644 (file)
index 0000000..2dcba57
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-03-1
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"10.240.1.1/24"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_02.yaml b/resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_02.yaml
new file mode 100644 (file)
index 0000000..cd26678
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-03-2
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"10.240.1.8/24"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_03.yaml b/resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_03.yaml
new file mode 100644 (file)
index 0000000..3bfcfb1
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-03-3
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"10.240.1.9/24"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_04.yaml b/resources/test_charts/tenantnetwork-attach-pod3/templates/tennet_pod_03_04.yaml
new file mode 100644 (file)
index 0000000..9b990c7
--- /dev/null
@@ -0,0 +1,30 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-03-4
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-01", "ip":"10.240.1.254/24"
+            }
+          ]
+    spec:
+      nodeSelector:
+        nodename: caas_master1
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod4/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod4/Chart.yaml
new file mode 100644 (file)
index 0000000..32df30d
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod4
+version: 1.0.0
+description: pods for TenantNetwork tests; dynamic ip allocation for non-default namespaced Deployment+TenantNetwork
diff --git a/resources/test_charts/tenantnetwork-attach-pod4/templates/tennet_pod_04.yaml b/resources/test_charts/tenantnetwork-attach-pod4/templates/tennet_pod_04.yaml
new file mode 100644 (file)
index 0000000..d8d1116
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-04
+  namespace: kube-system
+spec:
+  replicas: 5
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-02", "ip":"dynamic"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod5/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod5/Chart.yaml
new file mode 100644 (file)
index 0000000..882878f
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod5
+version: 1.0.0
+description: pods for TenantNetwork tests; ip allocation from outside CIDR - fails
diff --git a/resources/test_charts/tenantnetwork-attach-pod5/templates/tennet_pod_05.yaml b/resources/test_charts/tenantnetwork-attach-pod5/templates/tennet_pod_05.yaml
new file mode 100644 (file)
index 0000000..f9a9c09
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-05
+  namespace: kube-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-02", "ip":"10.240.3.1"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod6/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod6/Chart.yaml
new file mode 100644 (file)
index 0000000..38bca85
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod6
+version: 1.0.0
+description: pods for TenantNetwork tests; no CIDR, no allocation_pool, static/dynamic ip alloc fails
diff --git a/resources/test_charts/tenantnetwork-attach-pod6/templates/tennet_pod_06_01.yaml b/resources/test_charts/tenantnetwork-attach-pod6/templates/tennet_pod_06_01.yaml
new file mode 100644 (file)
index 0000000..87ca048
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-06-01
+  namespace: default
+spec:
+  replicas: 3
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-07", "ip":"dynamic"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod6/templates/tennet_pod_06_02.yaml b/resources/test_charts/tenantnetwork-attach-pod6/templates/tennet_pod_06_02.yaml
new file mode 100644 (file)
index 0000000..dbe7702
--- /dev/null
@@ -0,0 +1,28 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-06-02
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-07", "ip":"10.240.6.6"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod7/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod7/Chart.yaml
new file mode 100644 (file)
index 0000000..db8c125
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod7
+version: 1.0.0
+description: pods for TenantNetwork tests; multiple TenantNetwork-s (flannel+ipvlan)
diff --git a/resources/test_charts/tenantnetwork-attach-pod7/templates/tennet_pod_07.yaml b/resources/test_charts/tenantnetwork-attach-pod7/templates/tennet_pod_07.yaml
new file mode 100644 (file)
index 0000000..8cb6769
--- /dev/null
@@ -0,0 +1,31 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-07
+  namespace: default
+spec:
+  replicas: 5
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-04", "ip":"dynamic"
+            },
+            {
+              "tenantNetwork":"tennet-attach-03", "ip":"dynamic", "ip6":"dynamic"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "--"]
+        args: ["sleep", "6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod8/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod8/Chart.yaml
new file mode 100644 (file)
index 0000000..9828f0f
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod8
+version: 1.0.0
+description: pods for TenantNetwork tests; check if service is reachable with flannel (static ip included in flannel cidr)
diff --git a/resources/test_charts/tenantnetwork-attach-pod8/templates/tennet_pod_08.yaml b/resources/test_charts/tenantnetwork-attach-pod8/templates/tennet_pod_08.yaml
new file mode 100644 (file)
index 0000000..3fc83ec
--- /dev/null
@@ -0,0 +1,29 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-08
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        name: tennet-pod-08
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-04", "ip":"10.244.100.100/24"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "sh", "-c", "--"]
+        args: ["echo -e 'HTTP/1.0 200 OK \n\nOK'>/tmp/temp; nc -l -p 4242 < /tmp/temp; sleep 6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod8/templates/tennet_pod_08_service.yaml b/resources/test_charts/tenantnetwork-attach-pod8/templates/tennet_pod_08_service.yaml
new file mode 100644 (file)
index 0000000..42bf0b8
--- /dev/null
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    name: tennet-pod-08
+  name: tennet-pod-08
+  namespace: default
+spec:
+  ports:
+  - name: tennet-pod-08
+    port: 4242
+    protocol: TCP
+  selector:
+    name: tennet-pod-08
diff --git a/resources/test_charts/tenantnetwork-attach-pod9/Chart.yaml b/resources/test_charts/tenantnetwork-attach-pod9/Chart.yaml
new file mode 100644 (file)
index 0000000..78a7ad4
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-attach-pod9
+version: 1.0.0
+description: pods for TenantNetwork tests; flannel with static ip allocation outside flannel CIDR, which is ignored, and dynamic alloc
diff --git a/resources/test_charts/tenantnetwork-attach-pod9/templates/tennet_pod_09_01.yaml b/resources/test_charts/tenantnetwork-attach-pod9/templates/tennet_pod_09_01.yaml
new file mode 100644 (file)
index 0000000..e42dd11
--- /dev/null
@@ -0,0 +1,29 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-09-01
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        name: tennet-pod-09
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-04", "ip":"10.0.0.1/24"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "sh", "-c", "--"]
+        args: ["echo -e 'HTTP/1.0 200 OK \n\nOK'>/tmp/temp; nc -l -p 4141 < /tmp/temp; sleep 6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod9/templates/tennet_pod_09_02.yaml b/resources/test_charts/tenantnetwork-attach-pod9/templates/tennet_pod_09_02.yaml
new file mode 100644 (file)
index 0000000..9c20aa2
--- /dev/null
@@ -0,0 +1,29 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: tennet-pod-09-02
+  namespace: default
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: alpine
+  template:
+    metadata:
+      labels:
+        name: tennet-pod-09
+        app: alpine
+      annotations:
+        danm.k8s.io/interfaces: |
+          [
+            {
+              "tenantNetwork":"tennet-attach-04", "ip":"dynamic"
+            }
+          ]
+    spec:
+      containers:
+      - name: alpine
+        image: {{ .Values.registry_url }}:5555/caas/alpine_test:latest
+        imagePullPolicy: IfNotPresent
+        command: ["/usr/bin/dumb-init", "-c", "sh", "-c", "--"]
+        args: ["echo -e 'HTTP/1.0 200 OK \n\nOK'>/tmp/temp; nc -l -p 4141 < /tmp/temp; sleep 6000"]
diff --git a/resources/test_charts/tenantnetwork-attach-pod9/templates/tennet_pod_09_service.yaml b/resources/test_charts/tenantnetwork-attach-pod9/templates/tennet_pod_09_service.yaml
new file mode 100644 (file)
index 0000000..7168836
--- /dev/null
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    name: tennet-pod-09
+  name: tennet-pod-09
+  namespace: default
+spec:
+  ports:
+  - name: tennet-pod-09
+    port: 4141
+    protocol: TCP
+  selector:
+    name: tennet-pod-09
diff --git a/resources/test_charts/tenantnetwork-attach-test/Chart.yaml b/resources/test_charts/tenantnetwork-attach-test/Chart.yaml
new file mode 100644 (file)
index 0000000..0d1fa75
--- /dev/null
@@ -0,0 +1,3 @@
+name:  tenantnetwork-attach-test
+version: 1.0.0
+description: tenantnetwork manifests for testing network attach
diff --git a/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_01.yaml b/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_01.yaml
new file mode 100644 (file)
index 0000000..71efaaf
--- /dev/null
@@ -0,0 +1,19 @@
+# valid TenantNetwork manifest with valid ipv4 CIDR notation, no routes, no allocation_pool
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-attach-01
+  namespace: default
+spec:
+  NetworkID: tnet01
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    container_prefix: tnet_
+    rt_tables: 100
+    cidr: 10.240.1.0/24
+    routes:
+      10.10.1.0/24: 10.240.1.100
+    allocation_pool:
+      start: 10.240.1.1
+      end: 10.240.1.9
diff --git a/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_02.yaml b/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_02.yaml
new file mode 100644 (file)
index 0000000..73d7018
--- /dev/null
@@ -0,0 +1,19 @@
+# valid TenantNetwork manifest with valid ipv4 CIDR notation, valid route, and no allocation_pool
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-attach-02
+  namespace: kube-system
+spec:
+  NetworkID: tnet02
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.2.0/24
+    allocation_pool:
+      start: 10.240.2.2
+      end: 10.240.2.6
+    routes:
+      10.10.2.0/24: 10.240.2.1
diff --git a/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_03.yaml b/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_03.yaml
new file mode 100644 (file)
index 0000000..1022001
--- /dev/null
@@ -0,0 +1,14 @@
+# valid TenantNetwork manifest with both ipv4 and ipv6 CIDR notation without any routes and no allocation_pool
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-attach-03
+  namespace: default
+spec:
+  NetworkID: tnet03
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    container_prefix: tnet03
+    cidr: 10.240.3.0/24
+    net6: 2001:db8::/45
diff --git a/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_04.yaml b/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_04.yaml
new file mode 100644 (file)
index 0000000..d68d9a5
--- /dev/null
@@ -0,0 +1,9 @@
+# valid TenantNetwork manifest with valid ipv4 and ipv6 CIDR notation, and with valid routes for both ipv4 and ipv6; no allocation_pool
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-attach-04
+  namespace: default
+spec:
+  NetworkID: flannel
+  NetworkType: flannel
diff --git a/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_05.yaml b/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_05.yaml
new file mode 100644 (file)
index 0000000..19d0221
--- /dev/null
@@ -0,0 +1,18 @@
+# valid TenantNetwork manifest with valid ipv4 and ipv6 notation, and with allocation_pool; no routes
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-attach-05
+  namespace: default
+spec:
+  NetworkID: tnet05
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    container_prefix: tnet5_
+    rt_tables: 200
+    cidr: 10.20.5.0/24
+    allocation_pool:
+      start: 10.20.5.100
+      end: 10.20.5.200
+    net6: 2001:db8::/45
diff --git a/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_06.yaml b/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_06.yaml
new file mode 100644 (file)
index 0000000..85d37a3
--- /dev/null
@@ -0,0 +1,18 @@
+# valid TenantNetwork manifest with valid ipv4 and ipv6 notation, and with allocation_pool; with ipv4 and ipv6 routes
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-attach-06
+  namespace: default
+spec:
+  NetworkID: tnet06
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.20.6.0/24
+    allocation_pool:
+      start: 10.20.6.1
+      end: 10.20.6.10
+    net6: 2001:db8::/45
diff --git a/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_07.yaml b/resources/test_charts/tenantnetwork-attach-test/templates/tennet_attach_07.yaml
new file mode 100644 (file)
index 0000000..ebd951b
--- /dev/null
@@ -0,0 +1,13 @@
+# valid TenantNetwork manifest with valid ipv4 and ipv6 notation, and with allocation_pool; with ipv4 and ipv6 routes
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-attach-07
+  namespace: default
+spec:
+  NetworkID: tnet07
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}
+    container_prefix: int
+    rt_tables: 200
diff --git a/resources/test_charts/tenantnetwork-test-error/Chart.yaml b/resources/test_charts/tenantnetwork-test-error/Chart.yaml
new file mode 100644 (file)
index 0000000..1695b1e
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-test-error
+version: 1.0.0
+description: test chart for invalid TenantNetwork manifests
\ No newline at end of file
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_01.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_01.yaml
new file mode 100644 (file)
index 0000000..11e70f4
--- /dev/null
@@ -0,0 +1,14 @@
+# invalid TenantNetwork manifest with incorrect ipv4 CIDR notation
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-01
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.256.1.0/24
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_02.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_02.yaml
new file mode 100644 (file)
index 0000000..537b458
--- /dev/null
@@ -0,0 +1,14 @@
+# invalid TenantNetwork manifest with incorrect ipv6 CIDR notation
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-02
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    net6: ffff::ff::2::/64
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_03_01.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_03_01.yaml
new file mode 100644 (file)
index 0000000..a07fcf3
--- /dev/null
@@ -0,0 +1,16 @@
+# invalid TenantNetwork manifest with invalid ipv4 route
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-03-01
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.1.0/24
+    routes:
+      10.240.1.0/24: 10.240.1.0.255
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_03_02.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_03_02.yaml
new file mode 100644 (file)
index 0000000..39c91fb
--- /dev/null
@@ -0,0 +1,16 @@
+# invalid TenantNetwork manifest with invalid ipv4 route
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-03-02
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.1.0/24
+    routes:
+      10.240.1.0/24: 10.240.2.254
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_04_01.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_04_01.yaml
new file mode 100644 (file)
index 0000000..ed115e5
--- /dev/null
@@ -0,0 +1,16 @@
+# invalid TenantNetwork manifest with incorrect ipv6 route
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-04-01
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    container_prefix: int
+    rt_tables: 200
+    net6: 2001:db8::/45
+    routes6:
+      2001:db8::/45: 2001:db8::1::2
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_04_02.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_04_02.yaml
new file mode 100644 (file)
index 0000000..02e4446
--- /dev/null
@@ -0,0 +1,16 @@
+# invalid TenantNetwork manifest with incorrect ipv6 route
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-04-02
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    container_prefix: int
+    rt_tables: 200
+    net6: 2001:db8::/45
+    routes6:
+      1abc:db8::/45: 1002:ac81:1::1
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_05.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_05.yaml
new file mode 100644 (file)
index 0000000..c0a9368
--- /dev/null
@@ -0,0 +1,14 @@
+# invalid TenantNetwork manifest with manually defined Alloc 
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-05
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    Alloc: gAAAAAAAAAAAAAAAAAAAAAAA=
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_06.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_06.yaml
new file mode 100644 (file)
index 0000000..bc1301a
--- /dev/null
@@ -0,0 +1,16 @@
+# invalid TenantNetwork manifest with allocation_pool defined without ipv4 CIDR
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-06
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens4
+    container_prefix: int
+    rt_tables: 200
+    allocation_pool:
+      start: 10.240.1.100
+      end: 10.240.1.200
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_07_01.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_07_01.yaml
new file mode 100644 (file)
index 0000000..7f66241
--- /dev/null
@@ -0,0 +1,17 @@
+# invalid TenantNetwork manifest where allocation_pool.end is defined to be smaller than allocation_pool.start
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-07-01
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.1.0/24
+    allocation_pool:
+      start: 10.240.1.101
+      end: 10.240.1.100
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_07_02.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_07_02.yaml
new file mode 100644 (file)
index 0000000..211e8bf
--- /dev/null
@@ -0,0 +1,17 @@
+# invalid TenantNetwork manifest where allocation_pool.end is defined to be smaller than allocation_pool.start
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-07-02
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.1.0/24
+    allocation_pool:
+      start: 10.240.0.254
+      end: 10.240.2.1
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_08.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_08.yaml
new file mode 100644 (file)
index 0000000..fb98e61
--- /dev/null
@@ -0,0 +1,17 @@
+# invalid TenantNetwork manifest with NetworkID that is longer than 11 characters
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-08
+  namespace: default
+spec:
+  NetworkID: twelve-chars
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.1.0/24
+    allocation_pool:
+      start: 10.240.1.100
+      end: 10.240.1.200
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_09.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_09.yaml
new file mode 100644 (file)
index 0000000..efcffba
--- /dev/null
@@ -0,0 +1,21 @@
+# invalid TenantNetwork manifest with AllowedTenants defined
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-09
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  AllowedTenants:
+    - default
+    - kube-system
+    - kube-public
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.1.0/24
+    allocation_pool:
+      start: 10.240.1.100
+      end: 10.240.1.200
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_10.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_10.yaml
new file mode 100644 (file)
index 0000000..6a531f4
--- /dev/null
@@ -0,0 +1,18 @@
+# invalid TenantNetwork manifest with vlan provided
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-10
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    vlan: 700
+    cidr: 10.240.1.0/24
+    allocation_pool:
+      start: 10.240.1.100
+      end: 10.240.1.200
diff --git a/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_11.yaml b/resources/test_charts/tenantnetwork-test-error/templates/tennet_invalid_11.yaml
new file mode 100644 (file)
index 0000000..0840a2b
--- /dev/null
@@ -0,0 +1,18 @@
+# invalid TenantNetwork manifest with vxlan defined
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-invalid-11
+  namespace: default
+spec:
+  NetworkID: internal
+  NetworkType: ipvlan
+  Options:
+    host_device: ens3
+    container_prefix: int
+    rt_tables: 200
+    vxlan: 1000
+    cidr: 10.240.1.0/24
+    allocation_pool:
+      start: 10.240.1.100
+      end: 10.240.1.200
diff --git a/resources/test_charts/tenantnetwork-test/Chart.yaml b/resources/test_charts/tenantnetwork-test/Chart.yaml
new file mode 100644 (file)
index 0000000..af373b5
--- /dev/null
@@ -0,0 +1,3 @@
+name: tenantnetwork-test
+version: 1.0.0
+description: test chart for valid TenantNetwork manifests
\ No newline at end of file
diff --git a/resources/test_charts/tenantnetwork-test/templates/tennet_01.yaml b/resources/test_charts/tenantnetwork-test/templates/tennet_01.yaml
new file mode 100644 (file)
index 0000000..e2eee32
--- /dev/null
@@ -0,0 +1,12 @@
+# valid TenantNetwork manifest with valid ipv4 CIDR notation, no routes, no allocation_pool
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-01
+  namespace: default
+spec:
+  NetworkID: tnet01
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_ext_if }}  
+    cidr: 10.240.1.0/24
diff --git a/resources/test_charts/tenantnetwork-test/templates/tennet_02.yaml b/resources/test_charts/tenantnetwork-test/templates/tennet_02.yaml
new file mode 100644 (file)
index 0000000..18dbc79
--- /dev/null
@@ -0,0 +1,16 @@
+# valid TenantNetwork manifest with valid ipv4 CIDR notation, valid route, and no allocation_pool
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-02
+  namespace: default
+spec:
+  NetworkID: tnet02
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}  
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.2.0/24
+    routes:
+      10.10.1.0/24: 10.240.2.1
diff --git a/resources/test_charts/tenantnetwork-test/templates/tennet_03.yaml b/resources/test_charts/tenantnetwork-test/templates/tennet_03.yaml
new file mode 100644 (file)
index 0000000..f120f4a
--- /dev/null
@@ -0,0 +1,14 @@
+# valid TenantNetwork manifest with both ipv4 and ipv6 CIDR notation without any routes and no allocation_pool
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-03
+  namespace: default
+spec:
+  NetworkID: tnet03
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}  
+    container_prefix: int
+    cidr: 10.240.3.0/24
+    net6: 2001:db8::/45
diff --git a/resources/test_charts/tenantnetwork-test/templates/tennet_04.yaml b/resources/test_charts/tenantnetwork-test/templates/tennet_04.yaml
new file mode 100644 (file)
index 0000000..a0a06e3
--- /dev/null
@@ -0,0 +1,18 @@
+# valid TenantNetwork manifest with valid ipv4 and ipv6 CIDR notation, and with valid routes for both ipv4 and ipv6; no allocation_pool
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-04
+  namespace: default
+spec:
+  NetworkID: tnet04
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_storage_if }}  
+    container_prefix: int
+    cidr: 10.240.4.0/24
+    net6: 2001:db8::/45
+    routes:
+        10.40.1.0/24: 10.240.4.254
+    routes6:
+        2001:db9::/45: 2001:db8:4::1
diff --git a/resources/test_charts/tenantnetwork-test/templates/tennet_05.yaml b/resources/test_charts/tenantnetwork-test/templates/tennet_05.yaml
new file mode 100644 (file)
index 0000000..7e1be7a
--- /dev/null
@@ -0,0 +1,18 @@
+# valid TenantNetwork manifest with valid ipv4 and ipv6 notation, and with allocation_pool; no routes
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-05
+  namespace: default
+spec:
+  NetworkID: tnet05
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}  
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.5.0/24
+    allocation_pool:
+      start: 10.240.5.100
+      end: 10.240.5.200
+    net6: 2001:db8::/45
diff --git a/resources/test_charts/tenantnetwork-test/templates/tennet_06.yaml b/resources/test_charts/tenantnetwork-test/templates/tennet_06.yaml
new file mode 100644 (file)
index 0000000..72ba689
--- /dev/null
@@ -0,0 +1,22 @@
+# valid TenantNetwork manifest with valid ipv4 and ipv6 notation, and with allocation_pool; with ipv4 and ipv6 routes
+apiVersion: danm.k8s.io/v1
+kind: TenantNetwork
+metadata:
+  name: tennet-06
+  namespace: default
+spec:
+  NetworkID: tnet06
+  NetworkType: ipvlan
+  Options:
+    host_device: {{ .Values.infra_int_if }}  
+    container_prefix: int
+    rt_tables: 200
+    cidr: 10.240.6.0/24
+    allocation_pool:
+      start: 10.240.6.100
+      end: 10.240.6.200
+    net6: 2001:db8::/45
+    routes:
+        10.60.1.0/24: 10.240.6.254
+    routes6:
+        2001:db8::/45: 2001:db8:1::1
\ No newline at end of file
diff --git a/resources/test_containers/alpine_test/Dockerfile b/resources/test_containers/alpine_test/Dockerfile
new file mode 100644 (file)
index 0000000..f507b85
--- /dev/null
@@ -0,0 +1,6 @@
+FROM alpine:3.7 
+
+RUN apk update \
+ && apk upgrade \
+ && apk add dumb-init \
+ && rm -rf /var/cache/apk
diff --git a/resources/test_containers/busybox/Dockerfile b/resources/test_containers/busybox/Dockerfile
new file mode 100644 (file)
index 0000000..f80992b
--- /dev/null
@@ -0,0 +1 @@
+FROM busybox:1.28.4
diff --git a/resources/test_containers/http-traffic-gen/Dockerfile b/resources/test_containers/http-traffic-gen/Dockerfile
new file mode 100644 (file)
index 0000000..16c7add
--- /dev/null
@@ -0,0 +1,10 @@
+FROM alpine:3.7
+MAINTAINER Tamas Kopcso <tamas.kopcso@nokia.com>
+
+RUN apk update
+RUN apk add python
+RUN apk add py-pip
+RUN pip install requests
+RUN mkdir /etc/http_traffic_gen
+COPY http_traffic_gen.py /etc/http_traffic_gen/http_traffic_gen.py 
+CMD ["python", "/etc/http_traffic_gen/http_traffic_gen.py"]
diff --git a/resources/test_containers/http-traffic-gen/http_traffic_gen.py b/resources/test_containers/http-traffic-gen/http_traffic_gen.py
new file mode 100644 (file)
index 0000000..df19fa5
--- /dev/null
@@ -0,0 +1,26 @@
+#!/usr/bin/python
+
+import thread
+import time
+import traceback
+import requests
+
+
+URL = "http://podinfo.kube-system.svc.rec.io:9898"
+LOCATION = "nokia"
+PARAMS = {'address': LOCATION}
+DELTA = 0.01
+
+
+def worker():
+    for _ in range(0, 1000):
+        time.sleep(DELTA)
+        requests.get(url=URL, params=PARAMS)
+
+try:
+    for x in range(0, 3):
+        thread.start_new_thread(worker, ("Thread-" + str(x), ))
+except Exception as e:
+    traceback.print_exc()
+
+time.sleep(40)
diff --git a/resources/test_containers/logger/Dockerfile b/resources/test_containers/logger/Dockerfile
new file mode 100644 (file)
index 0000000..59fb44f
--- /dev/null
@@ -0,0 +1,9 @@
+FROM python:3.6-alpine
+
+ENV STRPS 20
+ENV STRLEN 50
+ENV SPREAD 20
+
+COPY ./textgen.py ./
+
+CMD ["python3","textgen.py"]
diff --git a/resources/test_containers/logger/textgen.py b/resources/test_containers/logger/textgen.py
new file mode 100644 (file)
index 0000000..1d96164
--- /dev/null
@@ -0,0 +1,27 @@
+import random
+import string
+import os
+import time
+
+# Configure script based on environment variables
+PS = int(os.environ.get("STRPS", 10))
+LEN = int(os.environ.get("STRLEN", 200))
+SPREAD = int(os.environ.get("SPREAD", 10))
+
+i = 0
+T = time.time()
+RATE = PS
+TPS = PS
+
+while True:
+    GENLEN = int((LEN-13)*(1-((random.randint(0, SPREAD*2)-SPREAD)/200)))
+    print ("Rate=", RATE, ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits + " " +
+                                                " " + " ") for _ in range(GENLEN)))
+    time.sleep(1 / TPS)
+    i = i+1
+    if i >= PS / 2:
+        i = 0
+        t2 = time.time()
+        RATE = round(((PS / 2) / (t2 - T)), 2)
+        T = t2
+        TPS = TPS*(PS/RATE)
diff --git a/resources/test_containers/php-apache/Dockerfile b/resources/test_containers/php-apache/Dockerfile
new file mode 100644 (file)
index 0000000..3a10a75
--- /dev/null
@@ -0,0 +1,3 @@
+FROM php:5.6.36-apache-stretch
+ADD index.php /var/www/html/index.php
+RUN chmod a+rx index.php
diff --git a/resources/test_containers/php-apache/index.php b/resources/test_containers/php-apache/index.php
new file mode 100644 (file)
index 0000000..3fe2edc
--- /dev/null
@@ -0,0 +1,7 @@
+<?php
+  $x = 0.0001;
+  for ($i = 0; $i <= 1000000; $i++) {
+    $x += sqrt($x);
+  }
+  echo "OK!";
+?>
diff --git a/resources/test_containers/podinfo/Dockerfile b/resources/test_containers/podinfo/Dockerfile
new file mode 100644 (file)
index 0000000..7321859
--- /dev/null
@@ -0,0 +1 @@
+FROM stefanprodan/podinfo:1.4.2
diff --git a/testcases/HPA_check/Custom_HPA_check.py b/testcases/HPA_check/Custom_HPA_check.py
new file mode 100644 (file)
index 0000000..7da09bc
--- /dev/null
@@ -0,0 +1,67 @@
+import sys
+import os
+from robot.libraries.BuiltIn import BuiltIn
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+from decorators_for_robot_functionalities import *  # noqa
+import common_utils  # noqa
+from test_constants import *  # noqa
+
+
+execute = BuiltIn().get_library_instance('execute_command')
+stack_infos = BuiltIn().get_library_instance('stack_infos')
+
+
+def Custom_HPA_check():
+    steps = ['step1_check_initial_replica_count_custom',
+             'step2_check_scale_out_custom',
+             'step3_check_scale_in_custom']
+    BuiltIn().run_keyword("Custom_HPA_check.setup")
+    common_utils.keyword_runner(steps)
+
+
+GET_POD_REPLICA_COUNT = "kubectl get hpa --namespace=kube-system | grep podinfo | awk '{print $6}'"
+
+
+def setup():
+    # flags = ["--horizontal-pod-autoscaler-downscale-stabilization=10s", "--horizontal-pod-autoscaler-sync-period=10s"]
+    # common_utils.modify_static_pod_config(common_utils.add_flag_to_command, "cm.yml", flags)
+    common_utils.helm_install(chart_name="default/custom-metrics", release_name="podinfo",
+                              values="registry_url={reg_url}".format(reg_url=reg))
+    common_utils.check_kubernetes_object(kube_object=podinfo_pod,
+                                         tester_function=common_utils.test_kubernetes_object_available,
+                                         additional_filter="Running",
+                                         timeout=90)
+
+
+def step1_check_initial_replica_count_custom():
+    expected_initial_replica_num = 2
+    timeout = 1000
+    check_scaling(expected_initial_replica_num, timeout)
+
+
+def step2_check_scale_out_custom():
+    common_utils.helm_install(chart_name="default/http-traffic-gen", release_name="http-traffic-gen",
+                              values="registry_url={reg_url}".format(reg_url=reg))
+    common_utils.check_kubernetes_object(kube_object=http_traffic_gen,
+                                         tester_function=common_utils.test_kubernetes_object_available,
+                                         additional_filter="Running",
+                                         timeout=45)
+    expected_replicas = 3
+    timeout = 1000
+    check_scaling(expected_replicas, timeout)
+
+
+def step3_check_scale_in_custom():
+    expected_replicas = 2
+    timeout = 1000
+    check_scaling(expected_replicas, timeout)
+
+
+@robot_log
+def check_scaling(expected_replicas, timeout=60):
+    for _ in range(timeout):
+        BuiltIn().sleep('1s')
+        actual_replicas = int(execute.execute_unix_command(GET_POD_REPLICA_COUNT))
+        if actual_replicas == expected_replicas:
+            break
+    BuiltIn().should_be_equal(actual_replicas, expected_replicas)
diff --git a/testcases/HPA_check/HPA_check.py b/testcases/HPA_check/HPA_check.py
new file mode 100644 (file)
index 0000000..d6ab1e3
--- /dev/null
@@ -0,0 +1,70 @@
+import sys\r
+import os\r
+import time\r
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))\r
+from datetime import datetime  # noqa\r
+from datetime import timedelta  # noqa\r
+from robot.libraries.BuiltIn import BuiltIn  # noqa\r
+from robot.api import logger  # noqa\r
+import common_utils  # noqa\r
+from decorators_for_robot_functionalities import *  # noqa\r
+from test_constants import *  # noqa\r
+\r
+\r
+execute = BuiltIn().get_library_instance('execute_command')\r
+stack_infos = BuiltIn().get_library_instance('stack_infos')\r
+\r
+\r
+def HPA_check():\r
+    steps = ['step1_check_initial_replica_count',\r
+             'step2_check_scale_out',\r
+             'step3_check_scale_in']\r
+    BuiltIn().run_keyword("HPA_check.setup")\r
+    common_utils.keyword_runner(steps)\r
+\r
+\r
+def setup():\r
+    common_utils.helm_install(chart_name="default/php-apache", release_name="crf01",\r
+                              values="registry_url={reg_url}".format(reg_url=reg))\r
+    common_utils.check_kubernetes_object(kube_object=php_apache_pod,\r
+                                         tester_function=common_utils.test_kubernetes_object_available,\r
+                                         additional_filter="Running",\r
+                                         timeout=90)\r
+    flags = ["--horizontal-pod-autoscaler-downscale-stabilization=10s", "--horizontal-pod-autoscaler-sync-period=10s"]\r
+    common_utils.modify_static_pod_config(common_utils.add_flag_to_command, "cm.yml", flags)\r
+    common_utils.helm_install(chart_name="default/load-generator-for-apache", release_name="load")\r
+    common_utils.check_kubernetes_object(kube_object=load_generator_for_apache,\r
+                                         tester_function=common_utils.test_kubernetes_object_available,\r
+                                         additional_filter="Running",\r
+                                         timeout=60)\r
+\r
+\r
+def step1_check_initial_replica_count():\r
+    time.sleep(5)\r
+    replica_count = int(\r
+        execute.execute_unix_command("kubectl get hpa | grep php-apache-hpa | awk '{print $6}'"))\r
+    if replica_count == 1:\r
+        logger.info("number of php apache pod is 1")\r
+    else:\r
+        raise Exception("Expected initial replica count is not correct: expected: 1, got: " + str(replica_count))\r
+\r
+\r
+def step2_check_scale_out():\r
+    check_scaling(expected_replicas="2", timeout=360)\r
+\r
+\r
+def step3_check_scale_in():\r
+    check_scaling(expected_replicas="1", timeout=480)\r
+\r
+\r
+def check_scaling(expected_replicas, timeout):\r
+    wait_until = datetime.now() + timedelta(seconds=timeout)\r
+    actual_replicas = execute.execute_unix_command("kubectl get hpa | grep php-apache-hpa | awk '{print $6}'")\r
+    while actual_replicas != expected_replicas:\r
+        time.sleep(5)\r
+        actual_replicas = execute.execute_unix_command("kubectl get hpa | grep php-apache-hpa | awk '{print $6}'")\r
+        if actual_replicas == expected_replicas:\r
+            logger.info("number of php apache pod is " + expected_replicas + ", scale out was successful")\r
+        elif wait_until < datetime.now():\r
+            raise Exception("Scaling did not happen in " + str(timeout) + " seconds, expected replica count is " +\r
+                            expected_replicas + ", got " + actual_replicas)\r
diff --git a/testcases/basic_func_tests/tc_002_pod_health_check.py b/testcases/basic_func_tests/tc_002_pod_health_check.py
new file mode 100644 (file)
index 0000000..20235b9
--- /dev/null
@@ -0,0 +1,110 @@
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+from decorators_for_robot_functionalities import *
+import sys
+import os
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+from test_constants import *  # noqa
+import common_utils  # noqa
+
+
+ex = BuiltIn().get_library_instance('execute_command')
+STACK_INFOS = BuiltIn().get_library_instance('stack_infos')
+
+
+def tc_002_pod_health_check():
+    steps = ['step1_check_componentstatus',
+             'step2_check_kubelet_is_running',
+             'step3_check_apiserver_is_running',
+             'step4_check_all_kubernetes_pod',
+             'step5_check_services_with_systemctl']
+    common_utils.keyword_runner(steps)
+
+
+@pabot_lock("health_check_1")
+@pabot_lock("health_check_2")
+def step1_check_componentstatus():
+    stdout = ex.execute_unix_command("kubectl get componentstatus -o json | jq .items[].conditions[].type")
+    logger.console('\n')
+    for line in stdout.split('\n'):
+        if "Healthy" in line:
+            logger.console(line)
+        else:
+            raise Exception(line)
+
+
+@robot_log
+def check_container_is_running(name, nodes):
+    for key in nodes:
+        stdout = ex.execute_unix_command_on_remote_as_user("docker ps --filter status=running --filter name=" + name +
+                                                           " | grep -v pause | grep " + name + " | wc -l ", nodes[key])
+        if stdout == '1':
+            logger.console("\n" + name + " container is running on node " + key + ".")
+        else:
+            stdout = ex.execute_unix_command_on_remote_as_user("docker ps | grep -v pause | grep " + name, nodes[key])
+            raise Exception(name + "container is NOT running on node " + key + "\n" + stdout)
+
+
+@robot_log
+def check_program_is_running(name, nodes):
+    for key in nodes:
+        stdout = ex.execute_unix_command_on_remote_as_user("ps -aux | grep '" + name + "' | grep -v 'color' | wc -l ",
+                                                           nodes[key])
+        if stdout == '1':
+            logger.console("\n" + name + " is running on node " + key + ".")
+        else:
+            stdout = ex.execute_unix_command_on_remote_as_user("ps -aux | grep '" + name + "' | grep -v 'color'",
+                                                               nodes[key])
+            raise Exception(name + " is NOT running on node " + key + "\n" + stdout)
+
+
+def step2_check_kubelet_is_running():
+    all_nodes = STACK_INFOS.get_all_nodes()
+    check_program_is_running("/kubelet ", all_nodes)
+    check_program_is_running("/kubelet_healthcheck.sh", all_nodes)
+
+
+def step3_check_apiserver_is_running():
+    crf_nodes = STACK_INFOS.get_crf_nodes()
+    check_container_is_running("kube-apiserver", crf_nodes)
+
+
+@pabot_lock("health_check_1")
+def step4_check_all_kubernetes_pod():
+    LOG_DIR = os.path.join(os.path.dirname(__file__))
+    command = "kubectl get po -n kube-system | tail -n +2 | grep -vP 'Running"
+    for pod in pods_skipped:
+        command += '|'+pod
+    command += "'"
+    stdout = ex.execute_unix_command(command, fail_on_non_zero_rc=False, skip_prompt_in_command_output=True)[0]
+    if not stdout:
+        logger.console("\nAll kubernetes PODs are running.")
+        return
+    for line in stdout.split("\n"):
+        line = line.split()
+        command = "kubectl logs --namespace " + line[0] + " " + line[1]
+        filename = "tc004_step1_" + line[1] + ".log"
+        common_utils.gather_logs(command, filename, LOG_DIR)
+    raise Exception(stdout)
+
+
+def step5_check_services_with_systemctl():
+    all_nodes = STACK_INFOS.get_all_nodes()
+    command = "systemctl status | grep -E 'State: running|Jobs: 0 queued|Failed: 0 units' | grep -v grep"
+    for key in all_nodes:
+        logger.console(key)
+        stdout = "\nsystemctl status output:\n" + ex.execute_unix_command_on_remote_as_user(command, all_nodes[key])
+        if all(x in stdout for x in ["State: running", "Jobs: 0 queued", "Failed: 0 units"]):
+            logger.console(stdout)
+        else:
+            # cat is needed here to remove the coloring of the systemctl for the robot logs
+            failedservices = ex.execute_unix_command_on_remote_as_user("systemctl --failed | cat", all_nodes[key])
+            # TODO: cloud-final.service fails with unknown reason
+            if any(service in failedservices for service in services_skipped):
+                stdout = stdout + "\n" + ex.execute_unix_command_on_remote_as_user("systemctl --failed | cat",
+                                                                                   all_nodes[key])
+                logger.console(stdout)
+            else:
+                stdout = stdout + "\n" + ex.execute_unix_command_on_remote_as_user("systemctl --failed | cat",
+                                                                                   all_nodes[key])
+                raise Exception(stdout)
diff --git a/testcases/basic_func_tests/tc_003_test_registry.py b/testcases/basic_func_tests/tc_003_test_registry.py
new file mode 100644 (file)
index 0000000..d0107cc
--- /dev/null
@@ -0,0 +1,31 @@
+import sys
+import os
+from robot.libraries.BuiltIn import BuiltIn
+from robot.api import logger
+from test_constants import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+import common_utils  # noqa
+
+
+ex = BuiltIn().get_library_instance('execute_command')
+stack_infos = BuiltIn().get_library_instance('stack_infos')
+crf_nodes = stack_infos.get_crf_nodes()
+all_nodes = stack_infos.get_all_nodes()
+temp_image_tag = 'test'
+
+
+def tc_003_test_registry():
+    steps = ['step_1_test_registry']
+    common_utils.keyword_runner(steps)
+
+
+def step_1_test_registry():
+    docker_img_tag_command = "docker images | grep {0} | awk '{{ print $2 }}' | head -n1".format(test_image)
+    image_tag = ex.execute_unix_command(docker_img_tag_command).strip()
+    image = reg + ':' + reg_port + '/' + reg_path + '/' + test_image + ':' + image_tag
+    command = 'docker rmi ' + image + '; docker pull ' + image + '; docker push ' + image
+    logger.console("")
+    for key in all_nodes:
+        ex.execute_unix_command_on_remote_as_root(command, all_nodes[key], delay="30s",)
+        logger.console(key + ": registry reachable.")
diff --git a/testcases/basic_func_tests/tc_004_ssh_file_check.py b/testcases/basic_func_tests/tc_004_ssh_file_check.py
new file mode 100644 (file)
index 0000000..57d1871
--- /dev/null
@@ -0,0 +1,36 @@
+import sys
+import os
+from robot.libraries.BuiltIn import BuiltIn
+from robot.api import logger
+import common_utils
+from test_constants import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+
+ex = BuiltIn().get_library_instance('execute_command')
+stack_infos = BuiltIn().get_library_instance('stack_infos')
+
+
+def tc_004_ssh_file_check():
+    steps = ['step1_openstack_file_check_on_crf_nodes']
+    common_utils.keyword_runner(steps)
+
+
+def step1_openstack_file_check_on_crf_nodes():
+    check_file(stack_infos.get_crf_nodes(), '/etc/userconfig/', crf_node_openstack_file_types)
+
+
+def check_file(nodes, folder, files):
+    if not nodes:
+        logger.info("Nodes dictionary is empty, nothing to check.")
+        return
+    for key in nodes:
+        logger.console("\n" + key + " " + nodes[key])
+        for f in files:
+            full_file_path = folder + f
+            command = 'ls ' + full_file_path + ' | wc -l'
+            stdout = ex.execute_unix_command_on_remote_as_user(command, nodes[key])
+            if stdout == "1":
+                logger.console(full_file_path + " exists.")
+            else:
+                raise Exception(full_file_path + " not exists !")
diff --git a/testcases/basic_func_tests/tc_005_ssh_dns_server_check.py b/testcases/basic_func_tests/tc_005_ssh_dns_server_check.py
new file mode 100644 (file)
index 0000000..1d3b7d9
--- /dev/null
@@ -0,0 +1,60 @@
+import sys
+import os
+from robot.libraries.BuiltIn import BuiltIn
+from robot.api import logger
+import common_utils
+from test_constants import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+
+ex = BuiltIn().get_library_instance('execute_command')
+crf_nodes = BuiltIn().get_library_instance('stack_infos').get_crf_nodes()
+
+
+def tc_005_ssh_dns_server_check():
+    steps = ['step1_check_dns_server_replica_num_within_limits',
+             'step2_dns_server_port_check',
+             'step3_check_address_resolution']
+    common_utils.keyword_runner(steps)
+
+
+def step1_check_dns_server_replica_num_within_limits():
+    command = "kubectl get daemonset kube-dns --namespace=kube-system | grep kube-dns | awk {'print $5'}"
+    available_dns_replica_num = int(ex.execute_unix_command(command))
+    if available_dns_replica_num < min_dns_replica:
+        raise Exception(available_dns_replica_num + "DNS server is running! Minimum should be " + min_dns_replica + ".")
+    if available_dns_replica_num > max_dns_replica:
+        raise Exception(available_dns_replica_num + "DNS server is running! Maximum should be " + max_dns_replica + ".")
+
+
+def step2_dns_server_port_check():
+    nodes = get_nodes_containing_dns_servers()
+    check_program_listening_on_given_port_protocol_on_remote(nodes, 'dnsmasq', 'tcp', dns_masq_port)
+    check_program_listening_on_given_port_protocol_on_remote(nodes, 'kube-dns', 'tcp6', kube_dns_port)
+
+
+def step3_check_address_resolution():
+    ex.execute_unix_command("getent hosts " + test_address1)
+    ex.execute_unix_command("getent hosts " + test_address2)
+    logger.console("Addresses are resolved successfully")
+
+
+def get_nodes_containing_dns_servers():
+    dns_nodes = {}
+    logger.console("")
+    for name, ip in crf_nodes.items():
+        command = 'docker ps | grep dnsmasq | wc -l'
+        stdout = int(ex.execute_unix_command_on_remote_as_user(command, ip))
+        if stdout == 1:
+            logger.console('DNS server running on ' + name + ':' + ip)
+            dns_nodes[name] = ip
+        if stdout > 1:
+            raise Exception('Instead of one, ' + str(stdout) + ' DNS server running on node: ' + name + '!')
+    return dns_nodes
+
+
+def check_program_listening_on_given_port_protocol_on_remote(nodes, pname, proto, port):
+    command = 'netstat -lopna | grep --color=no -P "' + proto + ' .*:' + port + '.*LISTEN.*"' + pname
+    for name, ip in nodes.items():
+        stdout = ex.execute_unix_command_on_remote_as_root(command, ip)
+        logger.console(name + ':' + stdout)
diff --git a/testcases/basic_func_tests/tc_006_ssh_test_ext_ntp.py b/testcases/basic_func_tests/tc_006_ssh_test_ext_ntp.py
new file mode 100644 (file)
index 0000000..d009e85
--- /dev/null
@@ -0,0 +1,73 @@
+import sys
+import os
+from decorators_for_robot_functionalities import *
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+from test_constants import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+import common_utils  # noqa
+
+ex = BuiltIn().get_library_instance('execute_command')
+stack_infos = BuiltIn().get_library_instance('stack_infos')
+
+
+def tc_006_ssh_test_ext_ntp():
+    steps = ['step1_check_ntpd_service_and_ext_ntp_ip_on_crf_nodes']
+    common_utils.keyword_runner(steps)
+
+
+def step1_check_ntpd_service_and_ext_ntp_ip_on_crf_nodes():
+    crf_nodes = stack_infos.get_crf_nodes()
+    check_ntpd_status(crf_nodes)
+    check_if_nokia_ntp_server_address_set_on_crf_node(crf_nodes)
+
+
+@robot_log
+def check_ntpd_status(nodes):
+    if not nodes:
+        logger.info("Nodes dictionary is empty, nothing to check.")
+        return
+    command = 'systemctl status ntpd.service | grep --color=no "Active"'
+    for node in nodes:
+        logger.console("\nCheck ntpd status " + node + " " + nodes[node])
+        stdout = ex.execute_unix_command_on_remote_as_user(command, nodes[node])
+        if "running" not in stdout:
+            raise Exception("ntpd.service is not running!")
+
+
+@robot_log
+def get_ext_ntp_ips_from_node():
+    return stack_infos.get_inventory()["all"]["vars"]["time"]["ntp_servers"]
+
+
+@robot_log
+def filter_valid_ntp_servers(ntp_servers):
+    valid_servers = []
+    for server in ntp_servers:
+        stdout = ex.execute_unix_command("ntpdate -q {}".format(server), fail_on_non_zero_rc=False)
+        if "no server suitable for synchronization found" not in stdout:
+            valid_servers.append(server)
+    return valid_servers
+
+
+@robot_log
+def is_ntp_server_set_on_node(server_ip, node):
+    command = 'ntpq -pn | grep -w --color=no ' + server_ip
+    stdout = ex.execute_unix_command_on_remote_as_user(command, node, {}, fail_on_non_zero_rc=False)
+    return server_ip in str(stdout)
+
+
+@robot_log
+def check_if_nokia_ntp_server_address_set_on_crf_node(nodes):
+    ext_ntp_server_ips = get_ext_ntp_ips_from_node()
+    valid_servers = filter_valid_ntp_servers(ext_ntp_server_ips)
+    logger.info("The following ntp_servers will be tested:")
+    logger.info(valid_servers)
+    is_ip_set = True
+    for node in nodes:
+        for ntp_serv_ip in valid_servers:
+            if not is_ntp_server_set_on_node(ntp_serv_ip, node):
+                is_ip_set = False
+    if not is_ip_set:
+        raise Exception("Wrong or no NTP server address set!")
diff --git a/testcases/basic_func_tests/tc_007_ssh_test_overlay_quota.py b/testcases/basic_func_tests/tc_007_ssh_test_overlay_quota.py
new file mode 100644 (file)
index 0000000..87b7855
--- /dev/null
@@ -0,0 +1,79 @@
+import sys
+import os
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+from test_constants import *
+import common_utils
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+
+ex = BuiltIn().get_library_instance('execute_command')
+stack_infos = BuiltIn().get_library_instance('stack_infos')
+
+
+def tc_007_ssh_test_overlay_quota():
+    steps = ['step1_check_storage_driver_and_quota_setting',
+             'step2_check_copy_files']
+    common_utils.keyword_runner(steps)
+
+
+def step1_check_storage_driver_and_quota_setting():
+    logger.info("step1: check size of all container")
+    command = r"ps -eaf | grep --color=no dockerd | grep --color=no " \
+              r"'\-\-storage-driver overlay2 \-\-storage-opt overlay2.size='" + docker_size_quota
+    nodes = stack_infos.get_all_nodes()
+    for node in nodes:
+        logger.info("\nchecking docker daemon settings on " + node + " : " + nodes[node])
+        ex.execute_unix_command_on_remote_as_root(command, nodes[node])
+
+
+def get_containerid_of_flannel_from_node(nodeIp):
+    return ex.execute_unix_command_on_remote_as_root("docker ps | grep flanneld | cut -d ' ' -f1", nodeIp)
+
+
+def allocate_file_on_node(nodeIp, size, test_file):
+    command = "fallocate -l " + size + " /var/lib/docker/" + test_file
+    return ex.execute_unix_command_on_remote_as_root(command, nodeIp)
+
+
+def copy_file_to_container(nodeIp, containerId, actual_file):
+    command = "docker cp " + actual_file + " " + containerId + ":/"
+    return ex.execute_unix_command_on_remote_as_root(command, nodeIp, delay="120s")
+
+
+def delete_files_from_container(nodeIp, containerId, listOfFiles):
+    command = "docker exec -ti " + containerId + " rm -f /"
+    for file in listOfFiles:
+        ex.execute_unix_command_on_remote_as_root(command + file, nodeIp)
+
+
+def delete_files_from_node(nodeIp, listOfFiles):
+    command = "rm -f "
+    for f in listOfFiles:
+        ex.execute_unix_command_on_remote_as_root(command + '/var/lib/docker/' + f, nodeIp)
+
+
+def test_copy_file(nodeIp, fileSize, fileName, containerId):
+    allocate_file_on_node(nodeIp, fileSize, fileName)
+    copy_file_to_container(nodeIp, containerId, "/var/lib/docker/" + fileName)
+
+
+def step2_check_copy_files():
+    crfNodes = stack_infos.get_crf_nodes()
+    nodeIp = crfNodes.values()[0]
+    if not nodeIp:
+        raise Exception("controller-1 internal ip address is not available!")
+    containerId = get_containerid_of_flannel_from_node(nodeIp)
+    listOfFiles = ["tmp_file", "tiny_file"]
+    file_size = str(int(docker_size_quota[:-1]) * 1024 - 5) + 'M'
+    logger.info("step2: copy a smaller file than overlay quota to flannel.")
+    test_copy_file(nodeIp, file_size, listOfFiles[0], containerId)
+    logger.info("step2: copy 10 Mbytes file to flannel container. It should fail!")
+    try:
+        test_copy_file(nodeIp, "10M", listOfFiles[1], containerId)
+    except Exception as e:
+        if "no space left on device" not in str(e):
+            raise e
+        logger.info("file can't be copied to container as expected")
+    delete_files_from_container(nodeIp, containerId, listOfFiles)
+    delete_files_from_node(nodeIp, listOfFiles)
diff --git a/testcases/basic_func_tests/tc_008_storage_check.py b/testcases/basic_func_tests/tc_008_storage_check.py
new file mode 100644 (file)
index 0000000..90a79c8
--- /dev/null
@@ -0,0 +1,94 @@
+import sys\r
+import os\r
+from robot.libraries.BuiltIn import BuiltIn\r
+from robot.api import logger\r
+from decorators_for_robot_functionalities import *\r
+from test_constants import *\r
+\r
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))\r
+import common_utils  # noqa\r
+\r
+BuiltIn().import_library('pabot.PabotLib')\r
+pabot = BuiltIn().get_library_instance('pabot.PabotLib')\r
+\r
+execute = BuiltIn().get_library_instance('execute_command')\r
+stack_infos = BuiltIn().get_library_instance('stack_infos')\r
+pv_name = ""\r
+\r
+\r
+def tc_008_storage_check():\r
+    steps = ['step1_read_write_pv',\r
+             'step2_check_pv_retaining',\r
+             'step3_read_write_pv']\r
+    BuiltIn().run_keyword("tc_008_storage_check.Setup")\r
+    common_utils.keyword_runner(steps)\r
+\r
+\r
+def Setup():\r
+    pabot.acquire_lock("pv_test_ip")\r
+    install_charts()\r
+\r
+\r
+def step1_read_write_pv():\r
+    read_write_pv("step1.log")\r
+\r
+\r
+@pabot_lock("health_check_2")\r
+def step2_check_pv_retaining():\r
+    common_utils.helm_delete("storage-test")\r
+    common_utils.check_kubernetes_object(kube_object=pv_test_pod,\r
+                                         tester_function=common_utils.test_kubernetes_object_not_available,\r
+                                         timeout=90)\r
+    _install_storage_test_helm_chart()\r
+    pabot.release_lock("pv_test_ip")\r
+\r
+\r
+def step3_read_write_pv():\r
+    read_write_pv("step3.log")\r
+\r
+\r
+def read_write_pv(file_name):\r
+    pod_list = execute.execute_unix_command("kubectl get pod | grep pv-test-deployment | grep -i running | "\r
+                                            "awk '{print $1}'")\r
+\r
+    # write log on persistent storage from pods\r
+    for pod in pod_list.split("\n"):\r
+        pod = pod.strip()\r
+        logger.info("POD NAME: " + pod)\r
+        execute.execute_unix_command(\r
+            "kubectl exec " + pod + " -- sh -c 'echo test_log_" + pod + " >> /usr/share/storage_test/" + file_name +\r
+            "'")\r
+\r
+    # check if logs can be reached from containers\r
+    for pod in pod_list.split("\n"):\r
+        pod = pod.strip()\r
+        log = execute.execute_unix_command(\r
+            "kubectl exec " + pod + " -- sh -c 'cat /usr/share/storage_test/" + file_name + "'")\r
+        for pod_log in pod_list.split("\n"):\r
+            pod_log = pod_log.strip()\r
+            if pod_log not in log:\r
+                raise Exception("Log entry: test_log_" + pod_log + " is not found in log file")\r
+\r
+\r
+@pabot_lock("health_check_2")\r
+def install_charts():\r
+    common_utils.helm_install(chart_name="default/persistentvolume-claim", release_name="pvc")\r
+    common_utils.wait_if_pressure()\r
+    common_utils.check_kubernetes_object(kube_object=pv_test_pvc,\r
+                                         tester_function=common_utils.test_kubernetes_object_available,\r
+                                         additional_filter="Bound", timeout=90)\r
+    _install_storage_test_helm_chart()\r
+\r
+    global pv_name  # pylint: disable=global-statement\r
+    pv_name = execute.execute_unix_command("kubectl get pvc | grep pvc- | awk {'print$3'}")\r
+\r
+\r
+def _install_storage_test_helm_chart():\r
+    if stack_infos.get_worker_nodes():\r
+        common_utils.helm_install(chart_name="default/storage-test-worker", release_name="storage-test")\r
+    else:\r
+        common_utils.helm_install(chart_name="default/storage-test-oam", release_name="storage-test")\r
+    common_utils.wait_if_pressure()\r
+    common_utils.check_kubernetes_object(kube_object=pv_test_pod,\r
+                                         tester_function=common_utils.test_kubernetes_object_available,\r
+                                         additional_filter="Running", timeout=60)\r
diff --git a/testcases/cpu_pooling/tc_001_cpu_pool_validation_tests.py b/testcases/cpu_pooling/tc_001_cpu_pool_validation_tests.py
new file mode 100644 (file)
index 0000000..6d9b430
--- /dev/null
@@ -0,0 +1,358 @@
+import sys
+import os
+import re
+from robot.libraries.BuiltIn import BuiltIn
+from robot.libraries.String import String
+from robot.api import logger
+from decorators_for_robot_functionalities import *
+from time import sleep
+from test_constants import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+import common_utils  # noqa
+
+
+ex = BuiltIn().get_library_instance('execute_command')
+cpupools = {}
+
+
+def tc_001_cpu_pool_validation_tests():
+    steps = [
+        'step1_check_default_pool_cpu_node_capacity',
+        'step2_exclusive_and_shared',
+        'step3_annotation_without_requests',
+        'step4_annotation_without_container',
+        'step5_annotation_without_cpus',
+        'step6_request_for_default_pool',
+        'step7_pod_use_default_pool_guaranteed',
+        'step8_pod_use_default_pool_burstable',
+        'step9_1_exclusive_1_shared',
+        'step10_cpu_allowed_list_set_after_test_pod_deployed'
+    ]
+    BuiltIn().run_keyword("tc_001_cpu_pool_validation_tests.Setup")
+    common_utils.keyword_runner(steps)
+
+
+@pabot_lock("flannel_ip")
+def Setup():
+    global cpupools, nodename
+    nodename = common_utils.decide_nodename()
+    cpupools = common_utils.get_cpupools()
+    logger.info("CPU pools: " + str(cpupools))
+    logger.info("Default nodename to deploy: " + nodename)
+
+
+# set lock to not run with HPA_checks tests
+@pabot_lock("health_check_1")
+@pabot_lock("flannel_ip")
+def step1_check_default_pool_cpu_node_capacity():
+    node_cpu_capacity = get_node_cpu_capacity(nodename)
+    cpu_request = "{0}m".format(node_cpu_capacity)
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-default1", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},nodename={node_name},cpu_request={cpu},cpu_limit={cpu}"
+                                  .format(reg_url=reg, node_name=nodename, cpu=cpu_request))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod7,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+        logger.info("Default pool allocation successfull with maximum allocatable cpus!")
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod7,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+        cpu_request = "{0}m".format(node_cpu_capacity + 10)
+        common_utils.helm_install(chart_name="default/cpu-pooling-default1", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},nodename={node_name},cpu_request={cpu},cpu_limit={cpu}"
+                                  .format(reg_url=reg, node_name=nodename, cpu=cpu_request))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod7,
+                                                    expected_result="1",
+                                                    filter=r'(Pending)\s*[0]',
+                                                    timeout=90,
+                                                    delay=3)
+        logger.info("Default pool allocation failed with more cpu than allocatable as expected!")
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod7,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+@pabot_lock("health_check_1")
+@pabot_lock("flannel_ip")
+def step2_exclusive_and_shared():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-mix2", release_name="cpu-pooling",
+                                  values="registry_url={reg_url}".format(reg_url=reg))
+
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod6,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+        allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod6['obj_name'])
+        requested_cpupool = cpupools[nodename]['exclusive_caas'] + cpupools[nodename]['shared_caas']
+        if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, requested_cpupool):
+            raise Exception('{pod} not allocate CPUs from {req_pool} pool!'.format(pod=cpu_pooling_pod6['obj_name'],
+                                                                                   req_pool=requested_cpupool))
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod6,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+
+
+@pabot_lock("health_check_1")
+@pabot_lock("flannel_ip")
+def step3_annotation_without_requests():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-annotation1", release_name="cpu-pooling",
+                                  values="registry_url={reg_url}".format(reg_url=reg))
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod9,
+                                             tester_function=common_utils.test_kubernetes_object_available,
+                                             timeout=30,
+                                             delay=3)
+
+        result = ex.execute_unix_command('kubectl describe replicasets {0}'.format(cpu_pooling_pod9['obj_name']))
+
+        error = 'Container cpu-pooling has no pool requests in pod spec'
+
+        if error not in result:
+            raise Exception('Replicaset description does not contain expected error! -' + result)
+        else:
+            logger.info(error)
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod9,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+@pabot_lock("health_check_1")
+@pabot_lock("flannel_ip")
+def step4_annotation_without_container():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-annotation2", release_name="cpu-pooling",
+                                  values="registry_url={reg_url}".format(reg_url=reg))
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod10,
+                                             tester_function=common_utils.test_kubernetes_object_available,
+                                             timeout=30,
+                                             delay=3)
+
+        result = ex.execute_unix_command('kubectl describe replicasets {0}'.format(cpu_pooling_pod10['obj_name']))
+
+        error = "'container' is mandatory in annotation"
+
+        if error not in result:
+            raise Exception('Replicaset description does not contain expected error! -' + result)
+        else:
+            logger.info(error)
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod10,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+@pabot_lock("health_check_1")
+@pabot_lock("flannel_ip")
+def step5_annotation_without_cpus():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-annotation3", release_name="cpu-pooling",
+                                  values="registry_url={reg_url}".format(reg_url=reg))
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod11,
+                                             tester_function=common_utils.test_kubernetes_object_available,
+                                             timeout=30,
+                                             delay=3)
+
+        result = ex.execute_unix_command('kubectl describe replicasets {0}'.format(cpu_pooling_pod11['obj_name']))
+
+        error = "'cpus' field is mandatory in annotation"
+
+        if error not in result:
+            raise Exception('Replicaset description does not contain expected error! -' + result)
+        else:
+            logger.info(error)
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod11,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+@pabot_lock("health_check_1")
+@pabot_lock("flannel_ip")
+def step6_request_for_default_pool():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-default2", release_name="cpu-pooling",
+                                  values="registry_url={reg_url}".format(reg_url=reg))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod8,
+                                                    expected_result="1",
+                                                    filter=r'(Pending)\s*[0]',
+                                                    timeout=30,
+                                                    delay=3)
+        error = "Insufficient nokia.k8s.io/default"
+        result = ex.execute_unix_command('kubectl describe pod {podname}'.format(podname=cpu_pooling_pod8['obj_name']))
+
+        if error not in result:
+            raise Exception('Replicaset description does not contain expected error! -' + result)
+        else:
+            logger.info(error)
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod8,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+@pabot_lock("flannel_ip")
+def step7_pod_use_default_pool_guaranteed():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-default1", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg,
+                                                                                              node_name=nodename))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod7,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+
+        allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod7['obj_name'])
+        default_pool = cpupools[nodename]['default']
+        if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, default_pool):
+            raise Exception('{pod} not allocate CPU from default pool!'.format(pod=cpu_pooling_pod7['obj_name']))
+        check_qos_of_pod(cpu_pooling_pod7['obj_name'], "Guaranteed")
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod7,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+@pabot_lock("flannel_ip")
+def step8_pod_use_default_pool_burstable():
+    memory_request = "500Mi"
+    cpu_request = "250m"
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-default1", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},nodename={node_name},mem_request={mem},"
+                                         "cpu_request={cpu}".format(reg_url=reg, node_name=nodename, mem=memory_request,
+                                                                    cpu=cpu_request))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod7,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+
+        allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod7['obj_name'])
+        default_pool = cpupools[nodename]['default']
+        if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, default_pool):
+            raise Exception('{pod} not allocate CPU from default pool!'.format(pod=cpu_pooling_pod7['obj_name']))
+        check_qos_of_pod(cpu_pooling_pod7['obj_name'], "Burstable")
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod7,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+@pabot_lock("flannel_ip")
+def step9_1_exclusive_1_shared():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-mix1", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg,
+                                                                                              node_name=nodename))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod5,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod5,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+
+
+@pabot_lock("cpu_pooling")
+@pabot_lock("flannel_ip")
+def step10_cpu_allowed_list_set_after_test_pod_deployed():
+    cpu_setter_deleted = False
+    try:
+        cpu_pooling_setter["obj_count"] = ex.execute_unix_command("kubectl get pod --all-namespaces | "
+                                                                  "grep setter | wc -l")
+        ex.execute_unix_command("kubectl get ds -n kube-system cpu-setter -o yaml")
+        ex.execute_unix_command("kubectl get ds -n kube-system cpu-setter -o yaml > setter.yaml")
+        ex.execute_unix_command("kubectl delete ds -n kube-system cpu-setter")
+
+        cpu_setter_deleted = True
+
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_setter,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive1", release_name="cpu-pooling",
+                                  values="registry_url=" + reg + ",nodename=" + nodename)
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod1,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+
+        allowed_cpus_for_pod_before = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod1['obj_name'])
+
+        ex.execute_unix_command("kubectl create -f setter.yaml")
+
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_setter,
+                                                    expected_result=cpu_pooling_setter["obj_count"],
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+        cpu_setter_deleted = False
+        allowed_cpus_for_pod_after = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod1['obj_name'])
+        exclusive_cpus = cpupools[nodename]['exclusive_caas']
+        if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpus_for_pod_after, exclusive_cpus):
+            raise Exception('{pod} not allocate CPU from exclusive pool!'.format(pod=cpu_pooling_pod1['obj_name']))
+        if set(allowed_cpus_for_pod_before) == set(allowed_cpus_for_pod_after):
+            raise Exception('Allocated CPUs before setter deployed is equal with CPU set after deploy!')
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod1,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+        setter_count = ex.execute_unix_command("kubectl get pod --all-namespaces | grep setter | wc -l")
+        if cpu_setter_deleted:
+            if setter_count != "0":
+                search_cmd = "kubectl get pod -n kube-system |grep setter | awk '{print $1}'"
+                del_cmd = "kubectl -n kube-system delete pod --grace-period=0 --force --wait=false"
+
+                ex.execute_unix_command("for i in `{search}`; do {delete} $i; done".format(search=search_cmd,
+                                                                                           delete=del_cmd))
+                common_utils.check_kubernetes_object(kube_object=cpu_pooling_setter,
+                                                     tester_function=common_utils.test_kubernetes_object_not_available,
+                                                     timeout=90)
+            ex.execute_unix_command("kubectl create -f setter.yaml")
+
+            common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_setter,
+                                                        expected_result=cpu_pooling_setter["obj_count"],
+                                                        filter=r'(Running)\s*[0]',
+                                                        timeout=90)
+
+
+@robot_log
+def check_qos_of_pod(podname, qos_type):
+    command = "kubectl describe pod " \
+              "`kubectl get pod | grep {0} | awk '{{print $1}}'` | grep 'QoS Class:'".format(podname)
+    result = ex.execute_unix_command(command)
+    if qos_type not in result:
+        raise Exception("{pod} QoS should be {qos}, instead of {result}!".format(pod=podname, qos=qos_type,
+                                                                                 result=result))
+
+
+@robot_log
+def get_node_cpu_capacity(node_name):
+    command = "kubectl describe node `kubectl get no -L=nodename | grep {nodename} | awk '{{print $1}}'`"\
+        .format(nodename=node_name)
+    result = ex.execute_unix_command(command)
+    matched = re.search(r'Allocatable:(.|\n)*cpu:\s+(\d+)', result)
+    if matched:
+        max_cap = int(matched.group(2)) * 1000
+        matched = re.search(r'cpu\s+(\d+)m', result)
+        if matched:
+            return max_cap - int(matched.group(1))
+    raise Exception('Failed getting node CPU capacity!')
diff --git a/testcases/cpu_pooling/tc_002_exclusive_pool_tests.py b/testcases/cpu_pooling/tc_002_exclusive_pool_tests.py
new file mode 100644 (file)
index 0000000..9221b7a
--- /dev/null
@@ -0,0 +1,252 @@
+import sys
+import os
+from robot.libraries.BuiltIn import BuiltIn
+from robot.libraries.String import String
+from robot.api import logger
+from decorators_for_robot_functionalities import *
+from time import sleep
+from test_constants import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+import common_utils  # noqa
+
+
+ex = BuiltIn().get_library_instance('execute_command')
+cpupools = {}
+
+
+def tc_002_exclusive_pool_tests():
+    steps = [
+        'step1_no_annotation',
+        'step2_with_annotation',
+        'step3_more_replicas_than_cpus',
+        'step4_request_more_than_cpus',
+        'step5_less_cpu_annotation_than_request',
+        'step6_more_cpu_annotation_than_request',
+        'step_7_allocate_all_exclusive_and_new_one_start_running_after_needed_resource_is_freed_up'
+    ]
+
+    BuiltIn().run_keyword("tc_002_exclusive_pool_tests.Setup")
+    common_utils.keyword_runner(steps)
+
+
+def Setup():
+    global cpupools
+    global nodename
+    nodename = common_utils.decide_nodename()
+    cpupools = common_utils.get_cpupools()
+    logger.info("CPU pools: " + str(cpupools))
+    logger.info("Default nodename to deploy: " + nodename)
+
+
+@pabot_lock("flannel_ip")
+def step1_no_annotation():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive1", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg,
+                                                                                              node_name=nodename))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod1,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+
+        allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod1['obj_name'])
+        exclusive_cpus = cpupools[nodename]['exclusive_caas']
+        if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, exclusive_cpus):
+            raise Exception('{pod} not allocate CPU from exclusive pool!'.format(pod=cpu_pooling_pod1['obj_name']))
+    except Exception as e:
+        raise e
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod1,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+
+
+@pabot_lock("flannel_ip")
+def step2_with_annotation():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg,
+                                                                                              node_name=nodename))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+
+        allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod2['obj_name'])
+        exclusive_cpus = cpupools[nodename]['exclusive_caas']
+        if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, exclusive_cpus):
+            raise Exception('{pod} not allocate CPU from exclusive pool!'.format(pod=cpu_pooling_pod2['obj_name']))
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+
+
+@pabot_lock("flannel_ip")
+def step3_more_replicas_than_cpus():
+    num_of_replicas = len(cpupools[nodename]['exclusive_caas'])
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},nodename={node_name},replicas={cpus}"
+                                  .format(reg_url=reg, cpus=num_of_replicas+1, node_name=nodename))
+        cpu_pooling_pod2['obj_count'] = str(num_of_replicas)
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2,
+                                                    expected_result="1",
+                                                    filter=r'(Pending)\s*[0]',
+                                                    timeout=90,
+                                                    delay=3)
+        result = ex.execute_unix_command('kubectl describe pod {podname}'.format(podname=cpu_pooling_pod2['obj_name']))
+        error = 'Insufficient nokia.k8s.io/exclusive_caas'
+
+        if error not in result:
+            raise Exception('Replicaset description does not contain expected error! -' + result)
+        else:
+            logger.info(error)
+    finally:
+        cpu_pooling_pod2['obj_count'] = "1"
+
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+
+
+@pabot_lock("flannel_ip")
+def step4_request_more_than_cpus():
+    max_exclusive_pool_size = len(cpupools[nodename]['exclusive_caas'])
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},nodename={node_name},proc_req={cpus},pool_req={cpus}"
+                                  .format(reg_url=reg, cpus=max_exclusive_pool_size+1, node_name=nodename))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2,
+                                                    expected_result="1",
+                                                    filter=r'(Pending)\s*[0]',
+                                                    timeout=90,
+                                                    delay=3)
+        result = ex.execute_unix_command('kubectl describe pod {podname}'.format(podname=cpu_pooling_pod2['obj_name']))
+        error = 'Insufficient nokia.k8s.io/exclusive_caas'
+
+        if error not in result:
+            raise Exception('Replicaset description does not contain expected error! -' + result)
+        else:
+            logger.info(error)
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+
+
+@pabot_lock("flannel_ip")
+def step5_less_cpu_annotation_than_request():
+    annotation_cpu = 1
+    request_cpu = 2
+    cpu_pooling_pod2['obj_type'] = 'replicaset'
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling",
+                                  values="registry_url={url},nodename={node_name},proc_req={proc},pool_req={req}"
+                                  .format(url=reg, proc=annotation_cpu, req=request_cpu, node_name=nodename))
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
+                                             tester_function=common_utils.test_kubernetes_object_available,
+                                             timeout=10,
+                                             delay=3)
+        result = ex.execute_unix_command('kubectl describe replicaset {0}'.format(cpu_pooling_pod2['obj_name']))
+        error = 'Exclusive CPU requests {req} do not match to annotation {proc}'.format(req=request_cpu,
+                                                                                        proc=annotation_cpu)
+
+        if error not in result:
+            raise Exception('Replicaset description does not contain expected error! -' + result)
+        else:
+            logger.info(error)
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+        cpu_pooling_pod2['obj_type'] = 'pod'
+
+
+@pabot_lock("flannel_ip")
+def step6_more_cpu_annotation_than_request():
+    annotation_cpu = 2
+    request_cpu = 1
+    cpu_pooling_pod2['obj_type'] = 'replicaset'
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling",
+                                  values="registry_url={url},nodename={node_name},proc_req={proc},pool_req={req}"
+                                  .format(url=reg, proc=annotation_cpu, req=request_cpu, node_name=nodename))
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
+                                             tester_function=common_utils.test_kubernetes_object_available,
+                                             timeout=10,
+                                             delay=3)
+        result = ex.execute_unix_command('kubectl describe replicaset {0}'.format(cpu_pooling_pod2['obj_name']))
+        error = 'Exclusive CPU requests {req} do not match to annotation {proc}'.format(req=request_cpu,
+                                                                                        proc=annotation_cpu)
+
+        if error not in result:
+            raise Exception('Replicaset description does not contain expected error! -' + result)
+        else:
+            logger.info(error)
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+        cpu_pooling_pod2['obj_type'] = 'pod'
+
+
+@pabot_lock("flannel_ip")
+def step_7_allocate_all_exclusive_and_new_one_start_running_after_needed_resource_is_freed_up():
+    max_exclusive_pool_size = len(cpupools[nodename]['exclusive_caas'])
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive2", release_name="cpu-pooling1",
+                                  values="registry_url={reg_url},nodename={node_name},proc_req={cpus},pool_req={cpus}"
+                                  .format(reg_url=reg, cpus=max_exclusive_pool_size, node_name=nodename))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod2,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+        logger.info("Allocation of all exclusive CPU successfull!")
+
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive1", release_name="cpu-pooling2",
+                                  values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg,
+                                                                                              node_name=nodename))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod1,
+                                                    expected_result="1",
+                                                    filter=r'(Pending)\s*[0]',
+                                                    timeout=90,
+                                                    delay=3)
+        logger.info("Try to allocate more exclusive CPU -> Pod in Pending!")
+        common_utils.helm_delete("cpu-pooling1")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod1,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+
+    finally:
+        if common_utils.helm_list("cpu-pooling1") != "0":
+            common_utils.helm_delete("cpu-pooling1")
+        common_utils.helm_delete("cpu-pooling2")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod1,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod2,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=90)
+
+
+@robot_log
+def get_cpu_core_of_process(pod_name, command):
+    bash_command = "ps | grep '{proc_name}' | grep -v grep | awk '{{print $1}}'".format(proc_name=command)
+    proc_id = ex.execute_unix_command("kubectl exec `kubectl get pod | grep {0} | "
+                                      "awk '{{print $1}}'` -- {1}".format(pod_name, bash_command))
+    bash_command = "cat /proc/{0}/stat | awk '{{print $39}}'".format(proc_id)
+    result = ex.execute_unix_command("kubectl exec `kubectl get pod | grep {0} | "
+                                     "awk '{{print $1}}'` -- {1}".format(pod_name, bash_command))
+    return int(result)
diff --git a/testcases/cpu_pooling/tc_003_exclusive_pool_tests_more_cpu.py b/testcases/cpu_pooling/tc_003_exclusive_pool_tests_more_cpu.py
new file mode 100644 (file)
index 0000000..f48f7e4
--- /dev/null
@@ -0,0 +1,76 @@
+import sys
+import os
+from robot.libraries.BuiltIn import BuiltIn
+from robot.libraries.String import String
+from robot.api import logger
+from decorators_for_robot_functionalities import *
+from test_constants import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+import common_utils  # noqa
+
+
+ex = BuiltIn().get_library_instance('execute_command')
+cpupools = {}
+
+
+def tc_003_exclusive_pool_tests_more_cpu():
+    steps = ['step1_with_two_process']
+    BuiltIn().run_keyword("tc_003_exclusive_pool_tests_more_cpu.Setup")
+    common_utils.keyword_runner(steps)
+
+
+def Setup():
+    global cpupools, nodename
+    nodename = common_utils.decide_nodename()
+    cpupools = common_utils.get_cpupools()
+    logger.info("CPU pools: " + str(cpupools))
+    logger.info("Default nodename to deploy: " + nodename)
+
+
+def step1_with_two_process():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-exclusive3", release_name="cpu-pooling",
+                                  values="registry_url=" + reg + ",nodename=" + nodename)
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod3,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=10)
+
+        exclusive_cpus = cpupools[nodename]['exclusive_caas']
+
+        proc1_cpu, proc2_cpu = get_cpu_core_of_processes(cpu_pooling_pod3['obj_name'], "dumb-init -c sleep 1000")
+        if proc1_cpu not in exclusive_cpus:
+            raise Exception('{pod}: Proc1 running on non exclusive cpu core {cpu}!'
+                            .format(pod=cpu_pooling_pod3['obj_name'], cpu=proc1_cpu))
+        if proc2_cpu not in exclusive_cpus:
+            raise Exception('{pod}: Proc2 running on non exclusive cpu core {cpu}!'
+                            .format(pod=cpu_pooling_pod3['obj_name'], cpu=proc2_cpu))
+        if proc1_cpu == proc2_cpu:
+            raise Exception('{pod}: Two processes use same cpu core: {cpu}!'
+                            .format(pod=cpu_pooling_pod3['obj_name'], cpu=proc2_cpu))
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod3,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+@robot_log
+def get_cpu_core_of_processes(pod_name, command):
+    cpu_list = []
+    exact_pod_name = ex.execute_unix_command("kubectl get pod | grep {0} | awk '{{print $1}}'".format(pod_name))
+    bash_command = "ps | grep '{proc_name}' | grep -v grep | awk '{{print $1}}'".format(proc_name=command)
+    proc_ids = ex.execute_unix_command("kubectl exec {0} -- {1}".format(exact_pod_name, bash_command))
+    logger.info("PROC_IDS:" + proc_ids)
+    for id in proc_ids.splitlines():
+        bash_command = "cat /proc/{0}/stat | awk '{{print $39}}'".format(id)
+        command = "kubectl exec `kubectl get pod | grep {0} | awk '{{print $1}}'` -- {1}".format(pod_name, bash_command)
+        result = ex.execute_unix_command(command)
+        logger.info("CPU for pid " + id + "is: " + result)
+        cpu_list.append(int(result))
+    if len(cpu_list) == 1:
+        return cpu_list[0]
+    elif not cpu_list:
+        return ""
+    return cpu_list
diff --git a/testcases/cpu_pooling/tc_004_shared_cpu_pool_tests.py b/testcases/cpu_pooling/tc_004_shared_cpu_pool_tests.py
new file mode 100644 (file)
index 0000000..cd4e14c
--- /dev/null
@@ -0,0 +1,117 @@
+import sys
+import os
+import time
+import yaml
+from robot.libraries.BuiltIn import BuiltIn
+from robot.libraries.String import String
+from robot.api import logger
+from datetime import datetime
+from datetime import timedelta
+from decorators_for_robot_functionalities import *
+from test_constants import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+import common_utils  # noqa
+
+
+ex = BuiltIn().get_library_instance('execute_command')
+cpupools = {}
+max_shared_pool_size = 0
+
+
+def tc_004_shared_cpu_pool_tests():
+    steps = [
+        'step1_shared_passed',
+        'step2_shared_fail'
+    ]
+
+    BuiltIn().run_keyword("tc_004_shared_cpu_pool_tests.Setup")
+    common_utils.keyword_runner(steps)
+
+
+def Setup():
+    global cpupools, max_shared_pool_size, nodename
+    nodename = common_utils.decide_nodename()
+    cpupools = common_utils.get_cpupools()
+    logger.info("CPU pools: " + str(cpupools))
+    max_shared_pool_size = get_max_shared_cpus_len()
+
+
+def step1_shared_passed():
+    cpu_request = 500
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-shared1", release_name="cpu-pooling",
+                                  values="registry_url={url},pool_req={cpu_req},"
+                                         "nodename={node_name}".format(url=reg, cpu_req=cpu_request,
+                                                                       node_name=nodename))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod4,
+                                                    expected_result="1",
+                                                    filter=r'(Running)\s*[0]',
+                                                    timeout=90)
+
+        test_pod_cpu_usage(cpu_pooling_pod4['obj_name'], 90, cpu_request)
+        check_cpu_resources(cpu_pooling_pod4['obj_name'])
+
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod4,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+def step2_shared_fail():
+    try:
+        common_utils.helm_install(chart_name="default/cpu-pooling-shared1", release_name="cpu-pooling",
+                                  values="registry_url={reg_url},pool_req={cpus},nodename={node_name}"
+                                  .format(reg_url=reg, cpus=(max_shared_pool_size*1000)+100, node_name=nodename))
+        common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod4,
+                                                    expected_result="1",
+                                                    filter=r'(Pending)\s*[0]',
+                                                    timeout=90,
+                                                    delay=3)
+        ex.execute_unix_command('kubectl describe pod {podname} | grep "{check_str}"'
+                                .format(podname=cpu_pooling_pod4['obj_name'],
+                                        check_str='Insufficient nokia.k8s.io/shared_caas'))
+    finally:
+        common_utils.helm_delete("cpu-pooling")
+        common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod4,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=60)
+
+
+@robot_log
+def test_pod_cpu_usage(pod_name, timeout, threshold):
+    command = "kubectl top pod `kubectl get pod | grep {name} | awk '{{print $1}}'`".format(name=pod_name)
+    result, ec = ex.execute_unix_command(command, fail_on_non_zero_rc=False)
+    logger.info(ec + " - " + result)
+    wait_until = datetime.now() + timedelta(seconds=timeout)
+    while (ec != "0" or "0m" in result) and (datetime.now() < wait_until):
+        result, ec = ex.execute_unix_command(command, fail_on_non_zero_rc=False)
+        logger.info(ec + " - " + result)
+        time.sleep(1)
+    if ec != "0":
+        raise Exception("test_pod_cpu_usage failed: " + result)
+    else:
+        result = result.splitlines()[1].split()[1]
+    if int(result[:-1]) < threshold - 10 or int(result[:-1]) > threshold + 10:
+        raise Exception("CPU usage: {0} - request: {1}m".format(result, threshold))
+
+
+def get_max_shared_cpus_len():
+    maxlen = 0
+    for node in cpupools:
+        if 'shared_caas' in cpupools[node].keys() and len(cpupools[node]['shared_caas']) > maxlen:
+            maxlen = len(cpupools[node]['shared_caas'])
+    return maxlen
+
+
+@robot_log
+def check_cpu_resources(pod_name):
+    command = "kubectl get pod `kubectl get pod | grep {name} | awk '{{print $1}}'` -o yaml".format(name=pod_name)
+    result = ex.execute_unix_command(command)
+    result_dict = yaml.safe_load(result)
+    resources = result_dict['spec']['containers'][0]['resources']
+    if resources['requests']['cpu'] != '0':
+        raise Exception("CPU request should be 0! CPU request: " + resources['requests']['cpu'])
+    if resources['limits']['cpu'][:-1] != resources['limits']['nokia.k8s.io/shared_caas']:
+        raise Exception("CPU limit should be equal to nokia.k8s.io/shared_caas! " + resources['requests']['cpu'])
diff --git a/testcases/danm_network_check/danm_utils.py b/testcases/danm_network_check/danm_utils.py
new file mode 100644 (file)
index 0000000..2cf801a
--- /dev/null
@@ -0,0 +1,238 @@
+import time
+import json
+import re
+import os
+import users
+
+from robot.api import logger
+from robot.libraries.BuiltIn import BuiltIn
+from netaddr import IPAddress
+from decorators_for_robot_functionalities import *
+
+
+log_dir = os.path.join(os.path.dirname(__file__))
+ex = BuiltIn().get_library_instance('execute_command')
+sshlib = ex.get_ssh_library_instance()
+stack_infos = BuiltIn().get_library_instance('stack_infos')
+BuiltIn().import_library('pabot.PabotLib')
+pabot = BuiltIn().get_library_instance('pabot.PabotLib')
+
+
+@robot_log
+def check_host_interfaces(network_properties_dict):
+    for node, node_ip in stack_infos.get_all_nodes().items():
+        logger.info("Checking host interfaces on " + node + ": " + node_ip)
+        ex.ssh_to_another_node(node_ip, users.cloudadmin)
+        try:
+            for network in network_properties_dict:
+                if network_properties_dict[network]['host_if'] != '':
+                    if (node in stack_infos.get_worker_nodes() and
+                            network_properties_dict[network]['iface_type'] != 'int'):
+                        continue
+                    command = "ip a | grep " + network_properties_dict[network]['host_if'] + " | wc -l"
+                    count = ex.execute_unix_command(command)
+                    if count != '1':
+                        raise Exception("host interface check Failed, interface " +
+                                        network_properties_dict[network]['host_if'] +
+                                        " does not exist on " + node + ": " + node_ip)
+                    logger.info("host interface check OK, interface " + network_properties_dict[network]['host_if'] +
+                                " exists on " + node)
+                else:
+                    command = "ip a | grep " + network_properties_dict[network]['name'] + "[.:] | wc -l"
+                    count = ex.execute_unix_command(command)
+                    if count != '0':
+                        raise Exception("host interface check Failed, " + network_properties_dict[network]['name'] +
+                                        " related interface exists on node: " + node + ": " + node_ip)
+                    logger.info("host interface check OK, no unnecessary " + network_properties_dict[network]['name'] +
+                                " related host interface exists on node: " + node + ": " + node_ip)
+        finally:
+            ex.exit_from_user()
+
+
+@robot_log
+def create_resources_from_fetched_chart_templates(template_path):
+    ex.execute_unix_command("kubectl create -f " + template_path, fail_on_non_zero_rc=False)
+
+
+@robot_log
+def delete_all_resources(resource_type):
+    ex.execute_unix_command("kubectl delete " + resource_type + " --all")
+
+
+@robot_log
+def delete_resources_by_manifest_path(path):
+    ex.execute_unix_command("kubectl delete -f " + path)
+
+
+@robot_log
+def get_resource_count(resource_type, resource_name):
+    return ex.execute_unix_command("kubectl get " + resource_type + " 2>/dev/null | grep -w " + resource_name +
+                                   " | wc -l")
+
+
+@robot_log
+def compare_test_data(list_to_compare, dict_to_compare):
+    for danmnet in list_to_compare:
+        if danmnet not in dict_to_compare:
+            logger.warn(danmnet + " is not present in test constants: {}".format(dict_to_compare))
+    for key in dict_to_compare:
+        if key not in list_to_compare:
+            logger.warn(key + " is not present in {} chart".format(list_to_compare))
+
+
+@robot_log
+def get_pod_list(kube_object):
+    pod_list = {}
+    command = "kubectl get pod --all-namespaces | grep -w " + kube_object[
+        'obj_name'] + " | awk '{print $1 \" \" $2 \" \" $4 \" \" $5}'"
+    for line in ex.execute_unix_command_as_root(command).split('\r\n'):
+        pod_list[line.split(' ')[1]] = {'namespace': line.split(' ')[0], 'status': line.split(' ')[2],
+                                        'restarts': line.split(' ')[3]}
+    return pod_list
+
+
+@robot_log
+def get_pod_ips(pod_list, skip_restarts=False, if_name='eth0'):
+    assigned_ips = []
+    for key in pod_list:
+        if (pod_list[key]['status'] == 'Running') and ((pod_list[key]['restarts'] == '0') or skip_restarts):
+            logger.info(pod_list[key]['namespace'])
+            if if_name != '':
+                command = "kubectl exec " + key + " -n " + pod_list[key]['namespace'] + " ip a | grep " + if_name + \
+                          " | grep inet | awk '{print $2}' | awk -F \"/\" '{print $1}' "
+            else:
+                command = "kubectl exec " + key + " -n " + pod_list[key]['namespace'] + "  -- ip -o a | " \
+                          "grep -vE '(: lo|: eth0)' | grep inet | awk '{print $4}' | awk -F \"/\" '{print $1}'"
+            assigned_ips.append(ex.execute_unix_command_as_root(command))
+    return assigned_ips
+
+
+@robot_log
+def check_mac_address(pod_list, network, prop_dict):
+    command = "ip a | grep -wA 1 " + prop_dict[network]['host_if'] + " | grep ether | awk '{print $2}'"
+    host_mac = ex.execute_unix_command_as_root(command)
+    for pod in pod_list:
+        if (pod_list[pod]['status'] == 'Running') and (pod_list[pod]['restarts'] == '0'):
+            command = "kubectl exec " + pod + " -n " + pod_list[pod]['namespace'] + " ip a | grep -A 1 eth0 | " \
+                                                                                    "grep link | awk '{print $2}'"
+            pod_mac = ex.execute_unix_command_as_root(command)
+            if host_mac != pod_mac:
+                raise Exception("Wrong Mac address in pod " + pod + "hostmac: " + host_mac + " ; podmac: " + pod_mac)
+            logger.info("Correct mac address in pod " + pod)
+
+
+@robot_log
+def get_alloc_pool(network, dictionary, resource_type):
+    alloc_pool = {}
+    command = "kubectl get " + resource_type + " " + dictionary[network]['name'] + " -n " + \
+              dictionary[network]['namespace'] + " -o yaml " + \
+              " | grep allocation_pool -A 2 | grep start | awk {'print$2'}"
+    alloc_pool['start'] = ex.execute_unix_command_as_root(command)
+    command = "kubectl get " + resource_type + " " + dictionary[network]['name'] + " -n " + \
+              dictionary[network]['namespace'] + " -o yaml " + \
+              " | grep allocation_pool -A 2 | grep end | awk {'print$2'}"
+    alloc_pool['end'] = ex.execute_unix_command_as_root(command)
+    return alloc_pool
+
+
+@robot_log
+def check_dynamic_ips(alloc_pool, assigned_ips):
+    for ip in assigned_ips:
+        if (IPAddress(alloc_pool['start']) > IPAddress(ip)) or (IPAddress(ip) > IPAddress(alloc_pool['end'])):
+            raise Exception("Dynamic ip: {} is not in allocation pool: {} - {}".format(ip, alloc_pool['start'],
+                                                                                       alloc_pool['end']))
+    logger.info("All dynamic ips are from the allocation pool.")
+    if len((set(assigned_ips))) != len(assigned_ips):
+        raise Exception("duplicated IPs assigned")
+    logger.info("All allocated IPs are unique")
+
+
+@robot_log
+def check_static_routes(pod_list, network, properties_dict):
+    for pod in pod_list:
+        if (pod_list[pod]['status'] == 'Running') and (pod_list[pod]['restarts'] == '0'):
+            command = "kubectl exec " + pod + " -n " + pod_list[pod]['namespace'] + " route | grep " + \
+                      properties_dict[network]['routes'].split('/')[0] + " | grep " + \
+                      properties_dict[network]['routes'].split(' ')[1] + " | wc -l"
+            res = ex.execute_unix_command_as_root(command)
+            if res != '1':
+                raise Exception("static route in pod " + pod + " does not match with route defined in " + network)
+            logger.info("Static route in pod " + pod + " is as it should be.")
+
+
+@robot_log
+def check_connectivity(pod_list, pod, ip_list):
+    for ip in ip_list:
+        command = "kubectl exec " + pod + " -n " + pod_list[pod]['namespace'] + " -- sh -c \"ping -c 1 " + ip + "\""
+        stdout = ex.execute_unix_command_as_root(command)
+        if '0% packet loss' not in stdout:
+            raise Exception("pod " + pod + " cannot reach ip " + ip)
+        logger.info("pod " + pod + " can reach ip " + ip)
+
+
+@robot_log
+def check_danmnet_endpoints_deleted(kube_object, network, properties_dict, assigned_ips):
+    for ip in assigned_ips:
+        command = "kubectl get danmep -n " + kube_object['namespace'] + " -o yaml | grep -B 10 " + \
+                  properties_dict[network]['name'] + " | grep " + ip + " | wc -l"
+        res = ex.execute_unix_command_as_root(command)
+        if res != '0':
+            raise Exception("Endpoint with ip " + ip + " still exists.")
+    logger.info("The necessary endpoints are cleared")
+
+
+@robot_log
+def get_alloc_value(network, dictionary, resource_type):
+    command = "kubectl get " + resource_type + " " + dictionary[network]['name'] + " -o yaml | grep -w alloc | " \
+                                                                                   "awk '{print $2}'"
+    alloc = ex.execute_unix_command_as_root(command)
+    return alloc
+
+
+def check_danm_count(ip_count_before_parameter, cbr0_content1_parameter, tries):
+    if tries == 5:
+        raise Exception("Flannel ips are not cleared after pod deletion")
+    else:
+        tries = tries + 1
+    command = "ls -lrt /var/lib/cni/networks/cbr0/ | wc -l"
+    ip_count_after = ex.execute_unix_command_as_root(command)
+    command = "ls -lrt /var/lib/cni/networks/cbr0/"
+    cbr0_content2 = ex.execute_unix_command_as_root(command)
+    ip_count_before = ip_count_before_parameter
+    cbr0_content1 = cbr0_content1_parameter
+    if ip_count_before != ip_count_after:
+        logger.info(cbr0_content1)
+        logger.info(cbr0_content2)
+        time.sleep(30)
+        check_danm_count(ip_count_before, cbr0_content1, tries)
+
+
+@robot_log
+def check_dep_count(namespace, exp_count, test_pod_name_pattern=r'^danmnet-pods'):
+    tries = 0
+    deps = get_deps(namespace)
+    # test_pod_name_pattern = r'^danmnet-pods'
+    danmnet_test_deps = [dep for dep in deps if is_dep_belongs_to_pod(dep, test_pod_name_pattern)]
+    while (tries < 5) and (len(danmnet_test_deps) != exp_count):
+        time.sleep(20)
+        tries += 1
+        deps = get_deps(namespace)
+        danmnet_test_deps = [dep for dep in deps if is_dep_belongs_to_pod(dep, test_pod_name_pattern)]
+
+    if len(danmnet_test_deps) != exp_count:
+        raise Exception("Danm endpoint count is not as expected! Got: " + str(len(danmnet_test_deps)) + ", expected: " +
+                        str(exp_count))
+    logger.info("Danm endpoint count is as expected.")
+
+
+@robot_log
+def get_deps(namespace):
+    command = "kubectl get dep -n {} -o json".format(namespace)
+    deps_text = ex.execute_unix_command_as_root(command)
+    return json.loads(deps_text).get("items")
+
+
+@robot_log
+def is_dep_belongs_to_pod(dep, pod_pattern):
+    pod_name = dep["spec"]["Pod"]
+    return bool(re.search(pod_pattern, pod_name))
diff --git a/testcases/danm_network_check/tc_001_danmnet_object_check.py b/testcases/danm_network_check/tc_001_danmnet_object_check.py
new file mode 100644 (file)
index 0000000..f667615
--- /dev/null
@@ -0,0 +1,140 @@
+import sys
+import os
+import danm_utils
+from robot.libraries.BuiltIn import BuiltIn
+from robot.api import logger
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+import common_utils  # noqa
+from decorators_for_robot_functionalities import *  # noqa
+from test_constants import *  # noqa
+from execute_command import execute_command  # noqa
+
+
+execute = BuiltIn().get_library_instance('execute_command')
+stack_infos = BuiltIn().get_library_instance('stack_infos')
+infra_int_if = stack_infos.get_infra_int_if()
+infra_ext_if = stack_infos.get_infra_ext_if()
+infra_storage_if = stack_infos.get_infra_storage_if()
+
+
+def tc_001_danmnet_object_check():
+    steps = ['step1_inspect_clusternetworks',
+             'step2_inspect_tenantconfigs',
+             'step3_inspect_tenantnetworks']
+    BuiltIn().run_keyword("tc_001_danmnet_object_check.Setup")
+    common_utils.keyword_runner(steps)
+
+
+def Setup():
+    cnet_test = common_utils.get_helm_chart_content("default/clusternetwork-test")
+    danm_utils.compare_test_data(cnet_test, clusternetworks_properties)
+
+    cnet_test_error = common_utils.get_helm_chart_content("default/clusternetwork-test-error")
+    danm_utils.compare_test_data(cnet_test_error, clusternetworks_error_properties)
+
+    tenantconfig_test = common_utils.get_helm_chart_content("default/tenantconfig-test")
+    danm_utils.compare_test_data(tenantconfig_test, tenantconfig_properties)
+
+    tenantconfig_test_error = common_utils.get_helm_chart_content("default/tenantconfig-test-error")
+    danm_utils.compare_test_data(tenantconfig_test_error, tenantconfig_error_properties)
+
+    tenantnetwork_test = common_utils.get_helm_chart_content("default/tenantnetwork-test")
+    danm_utils.compare_test_data(tenantnetwork_test, tenantnetwork_properties)
+
+    tenantnetwork_test_error = common_utils.get_helm_chart_content("default/tenantnetwork-test-error")
+    danm_utils.compare_test_data(tenantnetwork_test_error, tenantnetwork_error_properties)
+
+
+def step1_inspect_clusternetworks():
+    logger.info("Deploying valid ClusterNetwork manifests fetched from helm chart \'clusternetwork-test\'.")
+    replace_ifaces_in_fetched_chart_templates("/tmp/clusternetwork-test/templates/*")
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/clusternetwork-test/templates")
+    for cnet in clusternetworks_properties:
+        cnet_name = clusternetworks_properties[cnet]['name']
+        count = danm_utils.get_resource_count(resource_type="clusternetwork", resource_name=cnet_name)
+        if count == '0':
+            raise Exception("ClusterNetwork " + cnet_name + " does not exist, but it should!")
+        logger.info("ClusterNetwork " + cnet_name + " exists as expected.")
+    danm_utils.check_host_interfaces(clusternetworks_properties)
+
+    logger.info("Deploying invalid ClusterNetwork manifests fetched from helm chart \'clusternetwork-test-error\'."
+                " All should fail.")
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/clusternetwork-test-error/templates")
+    for cnet in clusternetworks_error_properties:
+        cnet_name = clusternetworks_error_properties[cnet]['name']
+        count = danm_utils.get_resource_count(resource_type="clusternetworks", resource_name=cnet_name)
+        if count != '0':
+            raise Exception("ClusterNetwork " + cnet_name + " exists, but it should not!")
+        logger.info("ClusterNetwork " + cnet_name + " does not exist, as expected.")
+
+    danm_utils.delete_resources_by_manifest_path("/tmp/clusternetwork-test/templates")
+
+
+def step2_inspect_tenantconfigs():
+    logger.info("Deploying valid TenantConfig manifests fetched from helm chart \'tenantconfig-test\'.")
+    replace_ifaces_in_fetched_chart_templates("/tmp/tenantconfig-test/templates/*")
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantconfig-test/templates")
+    for tconf in tenantconfig_properties:
+        tconf_name = tenantconfig_properties[tconf]['name']
+        count = danm_utils.get_resource_count(resource_type="tenantconfig", resource_name=tconf_name)
+        if count == '0':
+            raise Exception("TenantConfig " + tconf_name + " does not exist, but it should!")
+        logger.info("TenantConfig " + tconf_name + " exists as expected.")
+
+    logger.info("Deploying invalid TenantConfig manifests fetched from helm chart \'tenantconfig-test-error\'. "
+                "All should fail.")
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantconfig-test-error/templates")
+    for tconf in tenantconfig_error_properties:
+        tconf_name = tenantconfig_error_properties[tconf]['name']
+        count = danm_utils.get_resource_count(resource_type="tenantconfig", resource_name=tconf_name)
+        if count != '0':
+            raise Exception("TenantConfig " + tconf_name + " exists, but it shouldn't!")
+        logger.info("TenantConfig " + tconf_name + " does not exist, as expected.")
+
+
+@pabot_lock("health_check_1")
+def step3_inspect_tenantnetworks():
+    danm_utils.delete_all_resources("tenantconfig")
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantconfig-test/templates/tconf_05.yaml")
+
+    # TenantNetwork-s with TenantConfig without vlan/vxlan
+    logger.info("Deploying valid TenantNetwork manifests fetched from helm chart \'tenantnetwork-test\'.")
+    replace_ifaces_in_fetched_chart_templates("/tmp/tenantnetwork-test/templates/*")
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantnetwork-test/templates")
+    for tnet in tenantnetwork_properties:
+        tnet_name = tenantnetwork_properties[tnet]['name']
+        count = danm_utils.get_resource_count(resource_type="tenantnetwork", resource_name=tnet_name)
+        if count == '0':
+            raise Exception("TenantNetwork " + tnet_name + " does not exist, but it should!")
+        logger.info("TenantNetwork " + tnet_name + " exists as expected.")
+
+    logger.info("Deploying invalid TenantNetwork manifests fetched from helm chart \'tenantnetwork-test-error\'. "
+                "All should fail.")
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantnetwork-test-error/templates")
+    for tnet in tenantnetwork_error_properties:
+        tnet_name = tenantnetwork_error_properties[tnet]['name']
+        count = danm_utils.get_resource_count(resource_type="tenantnetwork", resource_name=tnet_name)
+        if count != '0':
+            raise Exception("TenantNetwork " + tnet_name + " exists, but it shouldn't!")
+        logger.info("TenantNetwork " + tnet_name + " does not exist, as expected.")
+
+    danm_utils.delete_resources_by_manifest_path("/tmp/tenantnetwork-test/templates")
+    # TenantNetwork-s with TenantConfig with vlan/vxlan
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantconfig-test/templates/tconf_07.yaml")
+    danm_utils.delete_resources_by_manifest_path("/tmp/tenantconfig-test/templates/tconf_05.yaml")
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantnetwork-test/templates/")
+    danm_utils.check_host_interfaces(tenantnetwork_properties)
+
+    # cleanup after ourselves
+    danm_utils.delete_resources_by_manifest_path("/tmp/tenantnetwork-test/templates")
+    # redeploy the default TenantConfig 'danm-tenant-config' after we finish
+    execute.execute_unix_command("kubectl create -f /var/lib/caas/manifests/danm-tenant-config.yaml")
+    danm_utils.delete_resources_by_manifest_path("/tmp/tenantconfig-test/templates/tconf_07.yaml")
+
+
+@robot_log
+def replace_ifaces_in_fetched_chart_templates(path):
+    execute.execute_unix_command("sed -i 's/{{ .Values.infra_int_if }}/" + infra_int_if + "/g' " + path)
+    execute.execute_unix_command("sed -i 's/{{ .Values.infra_ext_if }}/" + infra_ext_if + "/g' " + path)
+    execute.execute_unix_command("sed -i 's/{{ .Values.infra_storage_if }}/" + infra_storage_if + "/g' " + path)
diff --git a/testcases/danm_network_check/tc_002_tenantnetwork_pod_check.py b/testcases/danm_network_check/tc_002_tenantnetwork_pod_check.py
new file mode 100644 (file)
index 0000000..6ff3a66
--- /dev/null
@@ -0,0 +1,310 @@
+import sys
+import os
+import common_utils
+import danm_utils
+
+from execute_command import execute_command
+from decorators_for_robot_functionalities import *
+from test_constants import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
+
+execute = BuiltIn().get_library_instance('execute_command')
+stack_infos = BuiltIn().get_library_instance('stack_infos')
+infra_int_if = stack_infos.get_infra_int_if()
+infra_ext_if = stack_infos.get_infra_ext_if()
+infra_storage_if = stack_infos.get_infra_storage_if()
+
+
+def tc_002_tenantnetwork_pod_check():
+    steps = ['step1_check_static_ip_allocations',
+             'step2_check_dynamic_ip_shortage',
+             'step3_check_static_ip_shortage',
+             'step4_check_attach_in_kubesystem_namespace',
+             'step5_check_static_ip_alloc_static_routes_success_after_purge',
+             'step6_check_step4_deletion_success',
+             'step7_check_static_ip_alloc_outside_cidr',
+             'step8_check_ip_alloc_with_cidrless_allocpoolless_tenantnet',
+             'step9_check_connection_to_flannel_and_ipvlan_tenantnetworks',
+             'step10_check_service_reachability_with_flannel',
+             'step11_check_flannel_static_ip_alloc_not_in_flannel_cidr_ignored',
+             'step12_none_ip_pod_restart_loop',
+             'step13_check_invalid_net_attach_and_successful_damnep_ip_release_after_retries',
+             'step14_check_realloc_ips_of_prev_step_with_dynamic_and_none_ip_alloc',
+             'tc_002_tenantnetwork_pod_check.Teardown']
+
+    BuiltIn().run_keyword("tc_002_tenantnetwork_pod_check.Setup")
+    common_utils.keyword_runner(steps)
+
+
+@pabot_lock("health_check_1")
+def Setup():
+    tennet_attach_test = common_utils.get_helm_chart_content("default/tenantnetwork-attach-test")
+    danm_utils.compare_test_data(tennet_attach_test, tenantnetwork_attach_properties)
+
+    replace_ifaces_in_fetched_chart_templates("/tmp/tenantnetwork-attach-test/templates/*")
+    replace_ifaces_in_fetched_chart_templates("/tmp/tenantconfig-test/templates/*")
+    # deploy a valid TenantConfig
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantconfig-test/templates/tconf_05.yaml")
+    # remove the default TenantConfig
+    execute.execute_unix_command("kubectl delete tenantconfig danm-tenant-config")
+    # deploy all TenantNetwork-s
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantnetwork-attach-test/templates")
+    set_expected_host_if_in_constants(tenantnetwork_attach_properties)
+
+
+@pabot_lock("health_check_1")
+def Teardown():
+    execute.execute_unix_command("kubectl create -f /var/lib/caas/manifests/danm-tenant-config.yaml")
+    execute.execute_unix_command("kubectl delete tenantconfig tconf-05")
+    execute.execute_unix_command("kubectl delete -f /tmp/tenantnetwork-attach-test/templates/")
+
+
+def step1_check_static_ip_allocations():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod1", release_name="tenantnetwork-attach-pod1")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod1,
+                                                expected_result=tennet_pod1['obj_count'],
+                                                filter=r'(Running)\s*[0]',
+                                                timeout=60,
+                                                delay=10)
+    pod_list = danm_utils.get_pod_list(tennet_pod1)
+    if set(tennet_pod1['ip_list']) != set(danm_utils.get_pod_ips(pod_list)):
+        raise Exception("Static ip allocation for tenantnetwork-attach-pod1 was unsuccessful!")
+    logger.info("Static ips allocated successfully!")
+    danm_utils.check_mac_address(pod_list, 'tennet_attach_01', tenantnetwork_attach_properties)
+
+
+def step2_check_dynamic_ip_shortage():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod2", release_name="tenantnetwork-attach-pod2")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod2,
+                                                expected_result=tennet_pod2['obj_count'],
+                                                filter=r'(ContainerCreating)\s*[0]',
+                                                timeout=60)
+    alloc_pool = danm_utils.get_alloc_pool('tennet_attach_01', tenantnetwork_attach_properties, 'tenantnetwork')
+    danm_utils.check_dynamic_ips(alloc_pool, tennet_pod2['ip_list'])
+
+
+def step3_check_static_ip_shortage():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod3", release_name="tenantnetwork-attach-pod3")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod3,
+                                                expected_result=tennet_pod3['obj_count'],
+                                                filter=r'(ContainerCreating)\s*[0]',
+                                                timeout=30)
+    common_utils.helm_delete("tenantnetwork-attach-pod2")
+    common_utils.check_kubernetes_object(kube_object=tennet_pod2,
+                                         tester_function=common_utils.test_kubernetes_object_not_available,
+                                         timeout=60)
+    common_utils.helm_delete("tenantnetwork-attach-pod1")
+
+
+def step4_check_attach_in_kubesystem_namespace():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod4", release_name="tenantnetwork-attach-pod4")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod4,
+                                                expected_result=tennet_pod4['obj_count'],
+                                                filter=r'(Running)\s*[0]',
+                                                timeout=60,
+                                                delay=10)
+    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_02", tenantnetwork_attach_properties, 'tenantnetwork')
+    danm_utils.check_dynamic_ips(alloc_pool, tennet_pod4['ip_list'])
+    common_utils.helm_delete(release_name="tenantnetwork-attach-pod4")
+
+
+def step5_check_static_ip_alloc_static_routes_success_after_purge():
+    common_utils.check_kubernetes_object(kube_object=tennet_pod1,
+                                         tester_function=common_utils.test_kubernetes_object_not_available,
+                                         timeout=60)
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod3,
+                                                expected_result=tennet_pod3['obj_count'],
+                                                filter=r'(Running)\s*[0]',
+                                                timeout=60)
+    pod_list = danm_utils.get_pod_list(tennet_pod3)
+    if set(tennet_pod3['ip_list']) != set(danm_utils.get_pod_ips(pod_list)):
+        raise Exception("Static ip allocation for tenantnetwork-attach-pod3 was unsuccessful!")
+    logger.info("Static ips allocated successfully!")
+
+    danm_utils.check_static_routes(pod_list, 'tennet_attach_01', tenantnetwork_attach_properties)
+
+    danm_utils.check_connectivity(pod_list, list(pod_list)[0], tennet_pod3['ip_list'])
+    danm_utils.check_connectivity(pod_list, list(pod_list)[3], tennet_pod3['ip_list'])
+
+
+@robot_log
+def step6_check_step4_deletion_success():
+    common_utils.check_kubernetes_object(kube_object=tennet_pod4,
+                                         tester_function=common_utils.test_kubernetes_object_not_available,
+                                         timeout=60)
+    danm_utils.check_danmnet_endpoints_deleted(tennet_pod4, 'tennet_attach_02', tenantnetwork_attach_properties,
+                                               tennet_pod4['ip_list'])
+
+
+@robot_log
+def step7_check_static_ip_alloc_outside_cidr():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod5", release_name="tenantnetwork-attach-pod5")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod5,
+                                                expected_result=tennet_pod5['obj_count'],
+                                                filter=r'(ContainerCreating)\s*[0]',
+                                                timeout=90)
+
+
+@robot_log
+def step8_check_ip_alloc_with_cidrless_allocpoolless_tenantnet():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod6", release_name="tenantnetwork-attach-pod6")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod6,
+                                                expected_result=tennet_pod6['obj_count'],
+                                                filter=r'(ContainerCreating)\s*[0]',
+                                                timeout=90)
+
+
+@robot_log
+def step9_check_connection_to_flannel_and_ipvlan_tenantnetworks():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod7", release_name="tenantnetwork-attach-pod7")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod7,
+                                                expected_result=tennet_pod7['obj_count'],
+                                                filter=r'(Running)\s*[0]',
+                                                timeout=90)
+
+    pod_list = danm_utils.get_pod_list(tennet_pod7)
+    danm_utils.check_dynamic_ips(tenantnetwork_attach_properties['tennet_attach_04']['flannel_pool'],
+                                 danm_utils.get_pod_ips(pod_list))
+
+    alloc_pool = danm_utils.get_alloc_pool('tennet_attach_03', tenantnetwork_attach_properties, 'tenantnetwork')
+    danm_utils.check_dynamic_ips(alloc_pool, danm_utils.get_pod_ips(pod_list, if_name=''))
+
+
+@robot_log
+def step10_check_service_reachability_with_flannel():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod8", release_name="tenantnetwork-attach-pod8")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod8,
+                                                expected_result=tennet_pod8['obj_count'],
+                                                filter=r'(Running)\s*[0]',
+                                                timeout=90)
+    command = "curl tennet-pod-08.default.svc.rec.io:4242"
+    res = execute.execute_unix_command_as_root(command)
+    if "OK" not in res:
+        raise Exception("NOK: tennet-pod-08 service is not reachable")
+    logger.info("OK: tennet-pod-08 service is reachable")
+    pod_list = danm_utils.get_pod_list(tennet_pod8)
+    assigned_ips = danm_utils.get_pod_ips(pod_list)
+    danm_utils.check_dynamic_ips(tenantnetwork_attach_properties['tennet_attach_04']['flannel_pool'], assigned_ips)
+
+
+def step11_check_flannel_static_ip_alloc_not_in_flannel_cidr_ignored():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod9", release_name="tenantnetwork-attach-pod9")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod9,
+                                                expected_result=tennet_pod9['obj_count'],
+                                                filter=r'(Running)\s*[0]',
+                                                timeout=90)
+
+
+def step12_none_ip_pod_restart_loop():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod10",
+                              release_name="tenantnetwork-attach-pod10")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod10,
+                                                expected_result=tennet_pod10['obj_count'],
+                                                filter=r'(ContainerCreating)\s*[0]',
+                                                timeout=90)
+    common_utils.helm_delete("tenantnetwork-attach-pod3")
+    common_utils.helm_delete("tenantnetwork-attach-pod5")
+    common_utils.helm_delete("tenantnetwork-attach-pod6")
+    common_utils.helm_delete("tenantnetwork-attach-pod7")
+    common_utils.helm_delete("tenantnetwork-attach-pod8")
+    common_utils.helm_delete("tenantnetwork-attach-pod9")
+    common_utils.helm_delete("tenantnetwork-attach-pod10")
+
+
+def step13_check_invalid_net_attach_and_successful_damnep_ip_release_after_retries():
+    tnet1_alloc_before = danm_utils.get_alloc_value('tennet_attach_01', tenantnetwork_attach_properties,
+                                                    "tenantnetwork")
+    tnet5_alloc_before = danm_utils.get_alloc_value('tennet_attach_05', tenantnetwork_attach_properties,
+                                                    "tenantnetwork")
+    tnet6_alloc_before = danm_utils.get_alloc_value('tennet_attach_06', tenantnetwork_attach_properties,
+                                                    "tenantnetwork")
+    common_utils.get_helm_chart_content("default/tenantnetwork-attach-pod11")
+    common_utils.get_helm_chart_content("default/tenantnetwork-attach-pod13")
+
+    execute.execute_unix_command("sed -i 's/{{ .Values.registry_url }}/" + reg + "/g' " +
+                                 "/tmp/tenantnetwork-attach-pod11/templates/tennet_pod_11.yaml")
+    execute.execute_unix_command("sed -i 's/{{ .Values.registry_url }}/" + reg + "/g' " +
+                                 "/tmp/tenantnetwork-attach-pod13/templates/tennet_pod_13.yaml")
+
+    for _ in range(0, 10):
+        danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantnetwork-attach-pod11/templates")
+        danm_utils.create_resources_from_fetched_chart_templates("/tmp/tenantnetwork-attach-pod13/templates")
+        common_utils.test_kubernetes_object_quality(kube_object=tennet_pod11,
+                                                    expected_result=tennet_pod11['obj_count'],
+                                                    filter=r'(ContainerCreating)\s*[0]',
+                                                    timeout=40)
+        common_utils.test_kubernetes_object_quality(kube_object=tennet_pod13,
+                                                    expected_result=tennet_pod13['obj_count'],
+                                                    filter=r'(ContainerCreating)\s*[0]',
+                                                    timeout=40)
+        danm_utils.delete_resources_by_manifest_path("/tmp/tenantnetwork-attach-pod11/templates")
+        danm_utils.delete_resources_by_manifest_path("/tmp/tenantnetwork-attach-pod13/templates")
+        common_utils.check_kubernetes_object(kube_object=tennet_pod11,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=40)
+        common_utils.check_kubernetes_object(kube_object=tennet_pod13,
+                                             tester_function=common_utils.test_kubernetes_object_not_available,
+                                             timeout=40)
+
+    tnet1_alloc_after = danm_utils.get_alloc_value('tennet_attach_01', tenantnetwork_attach_properties, 'tenantnetwork')
+    tnet5_alloc_after = danm_utils.get_alloc_value('tennet_attach_05', tenantnetwork_attach_properties, 'tenantnetwork')
+    tnet6_alloc_after = danm_utils.get_alloc_value('tennet_attach_06', tenantnetwork_attach_properties, 'tenantnetwork')
+    if tnet1_alloc_before != tnet1_alloc_after:
+        raise Exception("allocation value in tennet_attach_01 is not as expected")
+    if tnet5_alloc_before != tnet5_alloc_after:
+        raise Exception("allocation value in tennet_attach_05 is not as expected")
+    if tnet6_alloc_before != tnet6_alloc_after:
+        raise Exception("allocation value in tennet_attach_06 is not as expected")
+    danm_utils.check_dep_count('default', exp_count=0)
+
+
+def step14_check_realloc_ips_of_prev_step_with_dynamic_and_none_ip_alloc():
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod12",
+                              release_name="tenantnetwork-attach-pod12")
+    common_utils.helm_install(chart_name="default/tenantnetwork-attach-pod14",
+                              release_name="tenantnetwork-attach-pod14")
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod12,
+                                                expected_result=tennet_pod12['obj_count'],
+                                                filter=r'(Running)\s*[0]',
+                                                timeout=90)
+    pod_list = danm_utils.get_pod_list(tennet_pod12)
+    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_01", tenantnetwork_attach_properties, "tenantnetwork")
+    danm_utils.check_dynamic_ips(alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='tnet_1'))
+    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_05", tenantnetwork_attach_properties, "tenantnetwork")
+    danm_utils.check_dynamic_ips(alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='eth0'))
+
+    common_utils.test_kubernetes_object_quality(kube_object=tennet_pod14,
+                                                expected_result=tennet_pod14['obj_count'],
+                                                filter=r'(Running)\s*[0]',
+                                                timeout=90)
+    pod_list = danm_utils.get_pod_list(tennet_pod14)
+    # danm_utils.check_dynamic_ips(alloc_pool, [tennet_pod14['ip_list'][2]])
+    danm_utils.check_dynamic_ips(alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='tnet5'))
+    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_06", tenantnetwork_attach_properties, "tenantnetwork")
+    danm_utils.check_dynamic_ips(alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='eth0'))
+    alloc_pool = danm_utils.get_alloc_pool("tennet_attach_01", tenantnetwork_attach_properties, "tenantnetwork")
+    danm_utils.check_dynamic_ips(alloc_pool, danm_utils.get_pod_ips(pod_list, if_name='tnet_2'))
+    common_utils.helm_delete("tenantnetwork-attach-pod12")
+    common_utils.helm_delete("tenantnetwork-attach-pod14")
+    common_utils.check_kubernetes_object(kube_object=tennet_pod12,
+                                         tester_function=common_utils.test_kubernetes_object_not_available,
+                                         timeout=20)
+    common_utils.check_kubernetes_object(kube_object=tennet_pod14,
+                                         tester_function=common_utils.test_kubernetes_object_not_available,
+                                         timeout=20)
+    danm_utils.check_dep_count(tennet_pod12["namespace"], exp_count=0)
+
+
+@robot_log
+def replace_ifaces_in_fetched_chart_templates(path):
+    execute.execute_unix_command("sed -i 's/{{ .Values.infra_int_if }}/" + infra_int_if + "/g' " + path)
+    execute.execute_unix_command("sed -i 's/{{ .Values.infra_ext_if }}/" + infra_ext_if + "/g' " + path)
+    execute.execute_unix_command("sed -i 's/{{ .Values.infra_storage_if }}/" + infra_storage_if + "/g' " + path)
+
+
+# TODO: figure out sg for host_if verif, make all v(x)lan and fill expected res in prop_dict
+@robot_log
+def set_expected_host_if_in_constants(properties_dict):
+    for elem in properties_dict:
+        properties_dict[elem]['host_if'] = infra_int_if
diff --git a/testcases/danm_network_check/tc_003_clusternetwork_pod_check.py b/testcases/danm_network_check/tc_003_clusternetwork_pod_check.py
new file mode 100644 (file)
index 0000000..dbc3bd4
--- /dev/null
@@ -0,0 +1,526 @@
+import sys\r
+import os\r
+import time\r
+import json\r
+import re\r
+import danm_utils\r
+from robot.api import logger\r
+from robot.libraries.BuiltIn import BuiltIn\r
+from netaddr import IPAddress\r
+\r
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))\r
+import common_utils  # noqa\r
+from execute_command import execute_command  # noqa\r
+from decorators_for_robot_functionalities import *  # noqa\r
+from test_constants import *  # noqa\r
+\r
+\r
+execute = BuiltIn().get_library_instance('execute_command')\r
+stack_infos = BuiltIn().get_library_instance('stack_infos')\r
+flannel_pool = {'start': '10.244.0.1', 'end': '10.244.255.254'}\r
+static_ips = ["10.5.1.11", "10.5.1.19", "10.5.1.20", "10.5.255.254", ]\r
+infra_int_if = stack_infos.get_infra_int_if()\r
+infra_ext_if = stack_infos.get_infra_ext_if()\r
+infra_storage_if = stack_infos.get_infra_storage_if()\r
+\r
+\r
+def tc_003_clusternetwork_pod_check():\r
+    """\r
+        danmnet_pods1: pods attached to d_test-net2 with static ips\r
+        danmnet_pods2: pods attached to d_test-net2 with dynamic ips\r
+        danmnet_pods3: pods attached to d_test-net2 with the same static ips as danmnet-pods1\r
+        danmnet_pods4: pods attached to ks_test-net2 with dynamic ips (kube-system namespace)\r
+        danmnet_pods5: pod attached to d_test-net2 with static ip, ip is not in CIDR\r
+        danmnet_pods6: pods attached to d_test-net1 with dynamic ips (no CIDR/alloc pool is defined in test-net1 )\r
+        danmnet_pods7: pods attached to d_test-net24(flannel) and d_test-net7(ipvlan) networks with dynamic ip\r
+        danmnet_pods8: pods attached to d_test-net24(flannel) with dynamic ip and service defined\r
+        danmnet_pods9: pods attached to d_test-net24(flannel) with static ip(ignored)\r
+        danmnet_pods10: pods attached to d_test-net2 with none ip\r
+        danmnet_pods11: pod attached to d_test-net30 with static ip, d_test-net8 with none ip, none existing\r
+        danmnet(error)\r
+        danmnet_pods12: pod attached to d_test-net30 with static ip, d_test-net8 with dynamic ip, d_test-net25 with\r
+        none ip\r
+        danmnet_pods13: pod attached to d_test-net8 with static ip, d_test-net24(flannel) with dynamic ip, none existing\r
+        danmnet(error)\r
+        danmnet_pods14: pod attached to d_test-net25 with static ip, d_test-net24(flannel) with dynamic ip\r
+\r
+        danmnet_pods1: pods attached to cnet-pod1 with static ips\r
+        danmnet_pods2: pods attached to cnet-pod1 with dynamic ips\r
+        danmnet_pods3: pods attached to cnet-pod1 with the same static ips as danmnet-pods1\r
+        danmnet_pods4: pods attached to ks_test-net2 with dynamic ips (kube-system namespace)\r
+        danmnet_pods5: pod attached to cnet-pod1 with static ip, ip is not in CIDR\r
+        danmnet_pods6: pods attached to d_test-net1 with dynamic ips (no CIDR/alloc pool is defined in test-net1 )\r
+        danmnet_pods7: pods attached to d_test-net24(flannel) and d_test-net7(ipvlan) networks with dynamic ip\r
+        danmnet_pods8: pods attached to d_test-net24(flannel) with dynamic ip and service defined\r
+        danmnet_pods9: pods attached to d_test-net24(flannel) with static ip(ignored)\r
+        danmnet_pods10: pods attached to cnet-pod1 with none ip\r
+        danmnet_pods11: pod attached to d_test-net30 with static ip, d_test-net8 with none ip,\r
+        none existing danmnet(error)\r
+        danmnet_pods12: pod attached to d_test-net30 with static ip, d_test-net8 with dynamic ip,\r
+        d_test-net25 with none ip\r
+        danmnet_pods13: pod attached to d_test-net8 with static ip, d_test-net24(flannel) with dynamic ip,\r
+        none existing danmnet(error)\r
+        danmnet_pods14: pod attached to d_test-net25 with static ip, d_test-net24(flannel) with dynamic ip\r
+\r
+\r
+    """\r
+    steps = ['step1', 'step2', 'step3', 'step5', 'step7', 'step8', 'step9', 'step10', 'step11', 'step12', 'step13',\r
+             'step14', 'tc_003_clusternetwork_pod_check.Teardown']\r
+\r
+    BuiltIn().run_keyword("tc_003_clusternetwork_pod_check.Setup")\r
+    common_utils.keyword_runner(steps)\r
+\r
+\r
+def Setup():\r
+    # execute.execute_unix_command("kubectl create -f /tmp/clusternetwork-test/templates/cnet_attach.yaml")\r
+    network_attach_test = common_utils.get_helm_chart_content("default/network-attach-test")\r
+    compare_test_data(network_attach_test, network_attach_properties)\r
+    replace_ifaces_in_fetched_chart_templates("/tmp/network-attach-test/templates/*")\r
+    danm_utils.create_resources_from_fetched_chart_templates("/tmp/network-attach-test/templates")\r
+\r
+    install_chart(danmnet_pods1)\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods1,\r
+                                                expected_result=danmnet_pods1['obj_count'],\r
+                                                filter=r'(Running)\s*[0]',\r
+                                                timeout=90)\r
+    install_chart(danmnet_pods2)\r
+    install_chart(danmnet_pods3)\r
+    # install_chart(danmnet_pods4)\r
+    install_chart(danmnet_pods5)\r
+    install_chart(danmnet_pods6)\r
+    install_chart(danmnet_pods7)\r
+    install_chart(danmnet_pods8)\r
+    install_chart(danmnet_pods9)\r
+    install_chart(danmnet_pods10)\r
+\r
+\r
+def Teardown():\r
+    common_utils.helm_delete("danmnet-pods12")\r
+    common_utils.helm_delete("danmnet-pods14")\r
+    danm_utils.delete_resources_by_manifest_path("/tmp/network-attach-test/templates/")\r
+\r
+\r
+def step1():\r
+    # Install danmnet_pods1: all of the pods should be in Running state, check static ips, mac address\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods1,\r
+                                                expected_result=danmnet_pods1['obj_count'],\r
+                                                filter=r'(Running)\s*[0]',\r
+                                                timeout=90,\r
+                                                delay=10)\r
+    pod_list = get_pod_list(danmnet_pods1)\r
+    danmnet_pods1['ip_list'] = get_pod_ips(pod_list)\r
+    if set(danmnet_pods1['ip_list']) != set(static_ips):\r
+        raise Exception("Static ip allocation for danmnet-pods1 was not successful, assigned ips!")\r
+    logger.info("Static ip allocation for danmnet-pods1 was successful")\r
+    check_mac_address(pod_list, 'cnet_pod1')\r
+\r
+\r
+def step2():\r
+    # Install danmnet_pods2: ips already used from allocation pool -> 3 pods in containercreating state, check remaining\r
+    # assigned ips in allocation pool\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods2,\r
+                                                expected_result=danmnet_pods2['obj_count'],\r
+                                                filter=r'(ContainerCreating)\s*[0]',\r
+                                                timeout=90)\r
+    pod_list = get_pod_list(danmnet_pods2)\r
+    alloc_pool = get_alloc_pool('cnet_pod1', network_attach_properties, 'clusternetwork')\r
+    danmnet_pods2['ip_list'] = get_pod_ips(pod_list)\r
+    check_dynamic_ips(alloc_pool, danmnet_pods2['ip_list'])\r
+\r
+\r
+def step3():\r
+    # Danmnet_pods3 pods are not running because static ips are already allocated\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods3,\r
+                                                expected_result=danmnet_pods3['obj_count'],\r
+                                                filter=r'(ContainerCreating)\s*[0]',\r
+                                                timeout=90)\r
+    # Delete danmnet_pods1, danmnet_pods2\r
+    common_utils.helm_delete("danmnet-pods2")\r
+    common_utils.check_kubernetes_object(kube_object=danmnet_pods2,\r
+                                         tester_function=common_utils.test_kubernetes_object_not_available,\r
+                                         timeout=90)\r
+    common_utils.helm_delete("danmnet-pods1")\r
+\r
+\r
+def step5():\r
+    # Check danmnet_pods1, danmnet_pods2 are purged, ips are reallocated for danmnet_pods3\r
+\r
+    common_utils.check_kubernetes_object(kube_object=danmnet_pods1,\r
+                                         tester_function=common_utils.test_kubernetes_object_not_available,\r
+                                         timeout=90)\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods3,\r
+                                                expected_result=danmnet_pods3['obj_count'],\r
+                                                filter=r'(Running)\s*[0]',\r
+                                                timeout=60)\r
+    pod_list = get_pod_list(danmnet_pods3)\r
+    assigned_ips = get_pod_ips(pod_list, skip_restarts=True)\r
+    if set(assigned_ips) != set(static_ips):\r
+        raise Exception("Static ip allocation for danmnet-pods3 was not successful!")\r
+    logger.info("Static ip allocation for danmnet-pods3 was successful")\r
+    check_static_routes(pod_list, 'cnet_pod1')\r
+\r
+    actual_pod = list(pod_list)[0]\r
+    check_connectivity(pod_list, actual_pod, static_ips)\r
+    actual_pod = list(pod_list)[3]\r
+    check_connectivity(pod_list, actual_pod, static_ips)\r
+\r
+\r
+def step6():\r
+    common_utils.check_kubernetes_object(kube_object=danmnet_pods4,\r
+                                         tester_function=common_utils.test_kubernetes_object_not_available,\r
+                                         timeout=90)\r
+    check_danmnet_endpoints(danmnet_pods4, 'test-net2', danmnet_pods4['ip_list'])\r
+\r
+\r
+def step7():\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods5,\r
+                                                expected_result=danmnet_pods5['obj_count'],\r
+                                                filter=r'(ContainerCreating)\s*[0]',\r
+                                                timeout=90)\r
+\r
+\r
+def step8():\r
+    # Dynamic ip allocation fails if no CIDR/allocation pool defined\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods6,\r
+                                                expected_result=danmnet_pods6['obj_count'],\r
+                                                filter=r'(ContainerCreating)\s*[0]',\r
+                                                timeout=90)\r
+\r
+\r
+def step9():\r
+    # multiple interfaces, check flannel and ipvlan ip allocation\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods7,\r
+                                                expected_result=danmnet_pods7['obj_count'],\r
+                                                filter=r'(Running)\s*[0]',\r
+                                                timeout=90)\r
+    pod_list = get_pod_list(danmnet_pods7)\r
+    assigned_ips = get_pod_ips(pod_list)\r
+    check_dynamic_ips(flannel_pool, assigned_ips)\r
+\r
+    alloc_pool = get_alloc_pool('cnet_pod3', network_attach_properties, 'clusternetwork')\r
+    assigned_ips = get_pod_ips(pod_list, if_name='')\r
+    check_dynamic_ips(alloc_pool, assigned_ips)\r
+\r
+\r
+def step10():\r
+    # Check service is reachable with flannel\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods8,\r
+                                                expected_result=danmnet_pods8['obj_count'],\r
+                                                filter=r'(Running)\s*[0]',\r
+                                                timeout=90)\r
+    command = "curl danmnet-pods8-1.default.svc.rec.io:4242"\r
+    res = execute.execute_unix_command_as_root(command)\r
+    if "OK" not in res:\r
+        raise Exception("NOK: danmnet-pods8-1 service is not reachable")\r
+    logger.info("OK: danmnet-pods8-1 service is reachable")\r
+    pod_list = get_pod_list(danmnet_pods8)\r
+    assigned_ips = get_pod_ips(pod_list)\r
+    check_dynamic_ips(flannel_pool, assigned_ips)\r
+\r
+\r
+def step11():\r
+    # Static ip allocation is ignored with flannel\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods9,\r
+                                                expected_result=danmnet_pods9['obj_count'],\r
+                                                filter=r'(Running)\s*[0]',\r
+                                                timeout=90)\r
+\r
+\r
+def step12():\r
+    # None ip, pod is restarting\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods10,\r
+                                                expected_result=danmnet_pods10['obj_count'],\r
+                                                filter=r'(ContainerCreating)\s*[0]',\r
+                                                timeout=90)\r
+    common_utils.helm_delete("danmnet-pods3")\r
+    common_utils.helm_delete("danmnet-pods4")\r
+    common_utils.helm_delete("danmnet-pods5")\r
+    common_utils.helm_delete("danmnet-pods6")\r
+    common_utils.helm_delete("danmnet-pods7")\r
+    common_utils.helm_delete("danmnet-pods8")\r
+    common_utils.helm_delete("danmnet-pods9")\r
+    common_utils.helm_delete("danmnet-pods10")\r
+    common_utils.check_kubernetes_object(kube_object=danmnet_pods_all,\r
+                                         tester_function=common_utils.test_kubernetes_object_not_available,\r
+                                         timeout=20)\r
+    check_dep_count(danmnet_pods1["namespace"], exp_count=0)\r
+\r
+\r
+@pabot_lock("pv_test_ip")\r
+@pabot_lock("flannel_ip")\r
+@pabot_lock("flannel_ip2")\r
+def step13():\r
+    # danmnet_pods11, danmnet_pods13 has invalid networks attached hance the pod creation will fail,\r
+    # checking if danmnet endpoints, ips are cleared after several unsuccessful pod creations\r
+    alloc_before_cnet_pod5 = get_alloc_value('cnet_pod5', network_attach_properties, 'clusternetwork')\r
+    alloc_before_cnet_pod6 = get_alloc_value('cnet_pod6', network_attach_properties, 'clusternetwork')\r
+    common_utils.get_helm_chart_content('default/' + danmnet_pods11['obj_name'])\r
+    execute.execute_unix_command("sed -i 's/{{ .Values.registry_url }}/" + reg + "/g' " + "/tmp/" +\r
+                                 danmnet_pods11['obj_name'] + "/templates/" + danmnet_pods11['obj_name'] + ".yaml")\r
+    common_utils.get_helm_chart_content('default/' + danmnet_pods13['obj_name'])\r
+    execute.execute_unix_command("sed -i 's/{{ .Values.registry_url }}/" + reg + "/g' " + "/tmp/" +\r
+                                 danmnet_pods13['obj_name'] + "/templates/" + danmnet_pods13['obj_name'] + ".yaml")\r
+    command = "ls -rt /var/lib/cni/networks/cbr0/ | wc -l"\r
+    ip_count_before = execute.execute_unix_command_as_root(command)\r
+    command = "ls -rt /var/lib/cni/networks/cbr0/"\r
+    cbr0_content1 = execute.execute_unix_command_as_root(command)\r
+\r
+    for _ in range(0, 10):\r
+        # danmnet_pods11 creation fails\r
+        command = "kubectl create -f /tmp/" + danmnet_pods11['obj_name'] + "/templates"\r
+        execute.execute_unix_command_as_root(command)\r
+\r
+        # danmnet_pods13 creation fails\r
+        command = "kubectl create -f /tmp/" + danmnet_pods13['obj_name'] + "/templates"\r
+        execute.execute_unix_command_as_root(command)\r
+        common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods11,\r
+                                                    expected_result=danmnet_pods11['obj_count'],\r
+                                                    filter=r'(ContainerCreating)\s*[0]',\r
+                                                    timeout=40)\r
+        common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods13,\r
+                                                    expected_result=danmnet_pods13['obj_count'],\r
+                                                    filter=r'(ContainerCreating)\s*[0]',\r
+                                                    timeout=40)\r
+        command = "kubectl delete -f /tmp/" + danmnet_pods11['obj_name'] + "/templates"\r
+        execute.execute_unix_command_as_root(command)\r
+        command = "kubectl delete -f /tmp/" + danmnet_pods13['obj_name'] + "/templates"\r
+        execute.execute_unix_command_as_root(command)\r
+        common_utils.check_kubernetes_object(kube_object=danmnet_pods11,\r
+                                             tester_function=common_utils.test_kubernetes_object_not_available,\r
+                                             timeout=40)\r
+        common_utils.check_kubernetes_object(kube_object=danmnet_pods13,\r
+                                             tester_function=common_utils.test_kubernetes_object_not_available,\r
+                                             timeout=40)\r
+    check_danm_count(ip_count_before, cbr0_content1, 0)\r
+    logger.info("All flannel ips are cleared")\r
+    alloc_after_cnet_pod5 = get_alloc_value('cnet_pod5', network_attach_properties, 'clusternetwork')\r
+    alloc_after_cnet_pod6 = get_alloc_value('cnet_pod6', network_attach_properties, 'clusternetwork')\r
+    if alloc_after_cnet_pod6 != alloc_before_cnet_pod6:\r
+        raise Exception("allocation value in cnet-pod6 is not as expected")\r
+    if alloc_after_cnet_pod5 != alloc_before_cnet_pod5:\r
+        raise Exception("allocation value in cnet-pod5 is not as expected")\r
+    check_dep_count('default', exp_count=0)\r
+\r
+\r
+def step14():\r
+    # Static ip, dynamic ip allocation and none ip in the same pod\r
+    # Check if the same ips can be allocated, which were failing in step 13\r
+    install_chart(danmnet_pods12)\r
+    install_chart(danmnet_pods14)\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods12,\r
+                                                expected_result=danmnet_pods12['obj_count'],\r
+                                                filter=r'(Running)\s*[0]',\r
+                                                timeout=90)\r
+    pod_list = get_pod_list(danmnet_pods12)\r
+    alloc_pool = get_alloc_pool('cnet_pod6', network_attach_properties, 'clusternetwork')\r
+    danmnet_pods12['ip_list'] = get_pod_ips(pod_list, if_name='eth1')\r
+    check_dynamic_ips(alloc_pool, danmnet_pods12['ip_list'])\r
+    danmnet_pods12['ip_list'] = get_pod_ips(pod_list, if_name='eth0')\r
+    if IPAddress(danmnet_pods12['ip_list'][0]) != IPAddress('10.10.0.250'):\r
+        raise Exception("static ip in pod danmnet-pods12 is not as expected")\r
+\r
+    common_utils.test_kubernetes_object_quality(kube_object=danmnet_pods14,\r
+                                                expected_result=danmnet_pods14['obj_count'],\r
+                                                filter=r'(Running)\s*[0]',\r
+                                                timeout=90)\r
+    pod_list = get_pod_list(danmnet_pods14)\r
+    danmnet_pods14['ip_list'] = get_pod_ips(pod_list, if_name='eth2')\r
+    if IPAddress(danmnet_pods14['ip_list'][0]) != IPAddress('10.10.0.254'):\r
+        raise Exception("static ip in pod danmnet-pods14 is not as expected")\r
+    common_utils.helm_delete("danmnet-pods12")\r
+    common_utils.helm_delete("danmnet-pods14")\r
+    common_utils.check_kubernetes_object(kube_object=danmnet_pods14,\r
+                                         tester_function=common_utils.test_kubernetes_object_not_available,\r
+                                         timeout=20)\r
+    check_dep_count(danmnet_pods12["namespace"], exp_count=0)\r
+\r
+\r
+@robot_log\r
+def check_danm_count(ip_count_before_parameter, cbr0_content1_parameter, tries):\r
+    command = "ls -rt /var/lib/cni/networks/cbr0/"\r
+    cbr0_content2 = execute.execute_unix_command_as_root(command)\r
+    if tries == 3:\r
+        diff = list(set(cbr0_content1_parameter) - set(cbr0_content2))\r
+        logger.info("Additional IPs after step: " + diff)\r
+        for ip in diff:\r
+            command = "cat /var/lib/cni/networks/cbr0/" + ip + " | grep -v eth"\r
+            cid = execute.execute_unix_command(command)\r
+            command = "docker ps -a --no-trunc | grep " + cid\r
+            docker_ps = execute.execute_unix_command(command)\r
+            logger.info("Additional ip belongs to the following container: " + docker_ps)\r
+        raise Exception("Flannel ips are not cleared after pod deletion")\r
+    else:\r
+        tries = tries + 1\r
+    command = "ls -rt /var/lib/cni/networks/cbr0/ | wc -l"\r
+    ip_count_after = execute.execute_unix_command_as_root(command)\r
+    ip_count_before = ip_count_before_parameter\r
+    cbr0_content1 = cbr0_content1_parameter\r
+    if ip_count_before != ip_count_after:\r
+        logger.info(cbr0_content1)\r
+        logger.info(cbr0_content2)\r
+        time.sleep(30)\r
+        check_danm_count(ip_count_before, cbr0_content1, tries)\r
+\r
+\r
+def install_chart(kube_object):\r
+    common_utils.helm_install(chart_name="default/" + kube_object['obj_name'], release_name=kube_object['obj_name'])\r
+\r
+\r
+@robot_log\r
+def get_pod_ips(pod_list, skip_restarts=False, if_name='eth0'):\r
+    assigned_ips = []\r
+    for key in pod_list:\r
+        if (pod_list[key]['status'] == 'Running') and ((pod_list[key]['restarts'] == '0') or skip_restarts):\r
+            logger.info(pod_list[key]['namespace'])\r
+            if if_name != '':\r
+                command = "kubectl exec " + key + " -n " + pod_list[key]['namespace'] + " ip a | grep " + if_name + \\r
+                          " | grep inet | awk '{print $2}' | awk -F \"/\" '{print $1}' "\r
+            else:\r
+                command = "kubectl exec " + key + " -n " + pod_list[key]['namespace'] + \\r
+                          "  -- ip -o a | grep -vE '(: lo|: eth0)' | grep inet | awk '{print $4}' | " \\r
+                          "awk -F \"/\" '{print $1}'"\r
+            assigned_ips.append(execute.execute_unix_command_as_root(command))\r
+    return assigned_ips\r
+\r
+\r
+@robot_log\r
+def get_alloc_pool(network, dictionary, resource_type):\r
+    alloc_pool = {}\r
+    command = "kubectl get " + resource_type + " " + dictionary[network]['name'] + " -o yaml " + \\r
+              " | grep allocation_pool -A 2 | grep start | awk {'print$2'}"\r
+    alloc_pool['start'] = execute.execute_unix_command_as_root(command)\r
+    command = "kubectl get " + resource_type + " " + dictionary[network]['name'] + " -o yaml " + \\r
+              " | grep allocation_pool -A 2 | grep end | awk {'print$2'}"\r
+    alloc_pool['end'] = execute.execute_unix_command_as_root(command)\r
+    return alloc_pool\r
+\r
+\r
+@robot_log\r
+def get_pod_list(kube_object):\r
+    pod_list = {}\r
+    command = "kubectl get pod --all-namespaces | grep -w " + kube_object[\r
+        'obj_name'] + " | awk '{print $1 \" \" $2 \" \" $4 \" \" $5}'"\r
+    for line in execute.execute_unix_command_as_root(command).split('\r\n'):\r
+        pod_list[line.split(' ')[1]] = {'namespace': line.split(' ')[0], 'status': line.split(' ')[2],\r
+                                        'restarts': line.split(' ')[3]}\r
+    return pod_list\r
+\r
+\r
+@robot_log\r
+def check_dynamic_ips(alloc_pool, assigned_ips):\r
+    for ip in assigned_ips:\r
+        if (IPAddress(alloc_pool['start']) > IPAddress(ip)) or (IPAddress(ip) > IPAddress(alloc_pool['end'])):\r
+            raise Exception("Dynamic ip is not in allocation pool")\r
+    logger.info("All dynamic ips are from the allocation pool.")\r
+    if len(list(set(assigned_ips))) != len(assigned_ips):\r
+        raise Exception("duplicated IPs assigned")\r
+    logger.info("All allocated IPs are unique")\r
+\r
+\r
+@robot_log\r
+def check_static_routes(pod_list, danmnet):\r
+    for pod in pod_list:\r
+        if (pod_list[pod]['status'] == 'Running') and (pod_list[pod]['restarts'] == '0'):\r
+            command = "kubectl exec " + pod + " -n " + pod_list[pod]['namespace'] + " route | grep " + \\r
+                      network_attach_properties[danmnet]['routes'].split('/')[0] + " | grep " + \\r
+                      network_attach_properties[danmnet]['routes'].split(' ')[1] + " | wc -l"\r
+            res = execute.execute_unix_command_as_root(command)\r
+            if res != '1':\r
+                raise Exception("static route in pod " + pod + " does not match with route defined in " + danmnet)\r
+            logger.info("Static route in pod " + pod + " is as it should be.")\r
+\r
+\r
+@robot_log\r
+def check_mac_address(pod_list, network):\r
+    command = "ip a | grep -wA 1 " + network_attach_properties[network]['host_if'] + " | grep ether | awk '{print $2}'"\r
+    host_mac = execute.execute_unix_command_as_root(command)\r
+    for pod in pod_list:\r
+        if (pod_list[pod]['status'] == 'Running') and (pod_list[pod]['restarts'] == '0'):\r
+            command = "kubectl exec " + pod + " -n " + pod_list[pod]['namespace'] + " ip a | grep -A 1 eth0 | " \\r
+                                                                                    "grep link | awk '{print $2}'"\r
+            pod_mac = execute.execute_unix_command_as_root(command)\r
+            if host_mac != pod_mac:\r
+                raise Exception("Wrong Mac address in pod " + pod)\r
+            logger.info("Correct mac address in pod " + pod)\r
+\r
+\r
+@robot_log\r
+def check_danmnet_endpoints(kube_object, network, assigned_ips):\r
+    for ip in assigned_ips:\r
+        command = "kubectl get danmep -n " + kube_object['namespace'] + " -o yaml | grep -B 10 " + \\r
+                  network_attach_properties[network]['name'] + " | grep " + ip + " | wc -l"\r
+        res = execute.execute_unix_command_as_root(command)\r
+        if res != '0':\r
+            raise Exception("Endpoint with ip " + ip + " still exists.")\r
+    logger.info("The necessary endpoints are cleared")\r
+\r
+\r
+@robot_log\r
+def check_connectivity(pod_list, pod, ip_list):\r
+    for ip in ip_list:\r
+        command = "kubectl exec " + pod + " -n " + pod_list[pod]['namespace'] + " -- sh -c \"ping -c 1 " + ip + "\""\r
+        stdout = execute.execute_unix_command_as_root(command)\r
+        if '0% packet loss' not in stdout:\r
+            raise Exception("pod " + pod + " cannot reach ip " + ip)\r
+        logger.info("pod " + pod + " can reach ip " + ip)\r
+\r
+\r
+@robot_log\r
+def check_dep_count(namespace, exp_count):\r
+    tries = 0\r
+    danm_eps = get_deps(namespace)\r
+    test_pod_name_pattern = r'^danmnet-pods'\r
+    danmnet_test_deps = [dep for dep in danm_eps if is_dep_belongs_to_pod(dep, test_pod_name_pattern)]\r
+    while (tries < 5) and (len(danmnet_test_deps) != exp_count):\r
+        time.sleep(20)\r
+        tries += 1\r
+        danm_eps = get_deps(namespace)\r
+        danmnet_test_deps = [dep for dep in danm_eps if is_dep_belongs_to_pod(dep, test_pod_name_pattern)]\r
+\r
+    if len(danmnet_test_deps) != exp_count:\r
+        raise Exception("Danm endpoint count is not as expected! Got: " + str(len(danmnet_test_deps)) + ", expected: " +\r
+                        str(exp_count))\r
+    logger.info("Danm endpoint count is as expected.")\r
+\r
+\r
+@robot_log\r
+def get_deps(namespace):\r
+    command = "kubectl get dep -n {} -o json".format(namespace)\r
+    deps_text = execute.execute_unix_command_as_root(command)\r
+    return json.loads(deps_text).get("items")\r
+\r
+\r
+@robot_log\r
+def is_dep_belongs_to_pod(dep, pod_pattern):\r
+    pod_name = dep["spec"]["Pod"]\r
+    return bool(re.search(pod_pattern, pod_name))\r
+\r
+\r
+@robot_log\r
+def get_alloc_value(network, dictionary, resource_type):\r
+    command = "kubectl get " + resource_type + " " + dictionary[network]['name'] + " -o yaml | grep -w alloc | " \\r
+                                                                                   "awk '{print $2}'"\r
+    alloc = execute.execute_unix_command_as_root(command)\r
+    return alloc\r
+\r
+\r
+@robot_log\r
+def replace_ifaces_in_fetched_chart_templates(path):\r
+    execute.execute_unix_command("sed -i 's/{{ .Values.infra_int_if }}/" + infra_int_if + "/g' " + path)\r
+    execute.execute_unix_command("sed -i 's/{{ .Values.infra_ext_if }}/" + infra_ext_if + "/g' " + path)\r
+    execute.execute_unix_command("sed -i 's/{{ .Values.infra_storage_if }}/" + infra_storage_if + "/g' " + path)\r
+\r
+\r
+@robot_log\r
+def compare_test_data(list_to_compare, dict_to_compare):\r
+    for danmnet in list_to_compare:\r
+        if danmnet not in dict_to_compare:\r
+            logger.warn(danmnet + " is not present in test constants: {}".format(dict_to_compare))\r
+    for key in dict_to_compare:\r
+        if key not in list_to_compare:\r
+            logger.warn(key + " is not present in {} chart".format(list_to_compare))\r
+\r
+\r
+@robot_log\r
+def delete_all_resources(resource_type):\r
+    execute.execute_unix_command("kubectl delete " + resource_type + " --all")\r
diff --git a/testcases/fluentd/tc_001_ssh_test_fluentd_logging.py b/testcases/fluentd/tc_001_ssh_test_fluentd_logging.py
new file mode 100644 (file)
index 0000000..26c2549
--- /dev/null
@@ -0,0 +1,82 @@
+import sys
+import os
+from robot.libraries.BuiltIn import BuiltIn
+from robot.api import logger
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../common'))
+import common_utils  # noqa
+from decorators_for_robot_functionalities import *  # noqa
+from test_constants import *  # noqa
+
+
+ex = BuiltIn().get_library_instance('execute_command')
+stack_infos = BuiltIn().get_library_instance('stack_infos')
+log_dir = os.path.join(os.path.dirname(__file__))
+
+
+def tc_001_ssh_test_fluentd_logging():
+    steps = ['step1_fluentd_logging_followup_check']
+    common_utils.keyword_runner(steps)
+
+
+# Get all pods or list of pods running on a node
+def kubernetes_get_all_pods(node, podsList):
+    pods = []
+    for po in podsList:
+        command = "kubectl get po --all-namespaces -o wide | grep " + node + " | grep -vP '"
+        for pod in pods_skipped[:-1]:
+            command += pod + '|'
+        command += pods_skipped[-1] + "' | grep " + po + " | awk '{print $2}'"
+        stdout = ex.execute_unix_command_on_remote_as_root(command, node)
+        for line in stdout.splitlines():
+            pods.append(line)
+    logger.info(pods)
+    return pods
+
+
+# Check wether logs from pods are gathered by fluentd or not
+# - Logs from pods in kube-system are monitored
+# - Logs from glusterfs in default are monitored
+# Looking into fluentd logs for messages like below:
+# "2017-10-17 13:03:14 +0000 [info]: plugin/in_tail.rb:586:initialize: following tail of
+# /var/log/containers/kube-proxy-172.24.16.104_kube-system_kube-proxy-
+# 81ea7d0e0fcfd372ac3cc2a7f980dc7761ede68566b1ef30663cbb1e46307e62.log"
+# meaning that e.g. kube-proxy container log is managed by fluentd
+# The research starts from the first "fluent starting" message and stops at first "restarting" occurrence if any
+def fluentd_logging_followup_check(nodes, followedPods):
+    for key in nodes:
+        command = "kubectl get po --all-namespaces -o wide|grep " + nodes[key] + "|grep fluent|awk '{print $2}'"
+        fluentd = ex.execute_unix_command(command)
+        pods = kubernetes_get_all_pods(nodes[key], followedPods)
+        if fluentd is not None:
+            for pod in pods:
+                command = "kubectl -n kube-system logs " + fluentd + \
+                          "|awk '/starting fluentd/{p=1;next}/restarting/{exit} p'|grep -c 'following.*'" + pod
+                logger.info(command)
+                stdout = ex.execute_unix_command_on_remote_as_root(command, nodes[key])
+                if stdout[0] == '0':
+                    err = key + ": Pod not followed by fluentd: " + pod
+                    raise Exception(err)
+        else:
+            err = key + ": Fluentd pod not found"
+            raise Exception(err)
+
+
+@pabot_lock("cpu_pooling")
+def step1_fluentd_logging_followup_check():
+    logger.console("\nstep1_fluentd_logging_followup_check")
+
+    nodes = stack_infos.get_crf_nodes()
+    # Monitor all pods in kube-system namespace
+    podsList = ["kube-system"]
+    fluentd_logging_followup_check(nodes, podsList)
+
+    nodes = stack_infos.get_storage_nodes()
+    # Monitor all pods in kube-system namespace
+    podsList = ["kube-system"]
+    fluentd_logging_followup_check(nodes, podsList)
+
+    nodes = stack_infos.get_worker_nodes()
+    # Monitor all pods in kube-system namespace
+    podsList = ["kube-system"]
+    fluentd_logging_followup_check(nodes, podsList)
diff --git a/testcases/fluentd/tc_002_elasticsearch_storage_check.py b/testcases/fluentd/tc_002_elasticsearch_storage_check.py
new file mode 100644 (file)
index 0000000..688266a
--- /dev/null
@@ -0,0 +1,80 @@
+import sys
+import os
+import datetime
+import json
+import common_utils
+from decorators_for_robot_functionalities import *
+from robot.libraries.BuiltIn import BuiltIn
+from robot.api import logger
+from test_constants import *
+from users import *
+
+sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../common'))
+
+ex = BuiltIn().get_library_instance('execute_command')
+
+
+def tc_002_elasticsearch_storage_check():
+    steps = ['step1_get_elasticsearch_kubernetes_data', 'step2_check_plugins']
+    if check_if_test_should_be_run():
+        common_utils.keyword_runner(steps)
+
+
+def check_if_test_should_be_run():
+    command = "cmcli get-property --property cloud.caas " \
+              "grep '\"infra_log_store\": \"elasticsearch\"' | wc -l"
+    if ex.execute_unix_command(command) != '1':
+        command = "cat {} | grep 'infra_log_store: elasticsearch' | wc -l".format(USER_CONFIG_PATH)
+        return ex.execute_unix_command(command) == '1'
+    return True
+
+
+@robot_log
+def elasticsearch_get_field(field):
+    data = '{ "size": 1, ' \
+           '"query": { ' \
+                '"exists": { "field": "' + field + '" } }, ' \
+                '"sort" : [ {"@timestamp" : {"order" : "desc"}} ] }'
+    header = "Content-Type: application/json"
+    es_index = "_all"
+    url = "{}/{}/_search".format(ELASTICSEARCH_URL, es_index)
+    request = "--header '{}' --request POST --data '{}' {}".format(header, data, url)
+
+    resp = ex.execute_unix_command("curl {}".format(request))
+    return json.loads(resp)
+
+
+@robot_log
+def elasticsearch_parse_field(msg, field):
+    if 'hits' not in msg:
+        msg = elasticsearch_get_field(field)
+        if 'hits' not in msg:
+            raise Exception('hits key not found in the following input:\n {}'.format(json.dumps(msg)))
+    msglen = len(msg['hits']['hits'])
+    output = {}
+    for i in range(msglen):
+        output['date'] = (msg['hits']['hits'][i]['_source']['@timestamp'])
+        output['tag'] = (msg['hits']['hits'][i]['_source'][field])
+    logger.info(output)
+    return output
+
+
+def step1_get_elasticsearch_kubernetes_data():
+    field = "kubernetes"
+    resp = elasticsearch_get_field(field)
+    output = elasticsearch_parse_field(resp, field)
+    if not output:
+        raise Exception("Logs with field {} not found!".format(field))
+
+
+def is_there_some_plugin(elastic_plugins):
+    return elastic_plugins.find("reindex") != -1
+
+
+def step2_check_plugins():
+    command = "curl http://elasticsearch-logging.kube-system.svc.nokia.net:9200/_cat/plugins?v"
+    elastic_plugins = ex.execute_unix_command_as_root(command)
+    if is_there_some_plugin(elastic_plugins):
+        logger.info("Installed elastic search plugins:" + elastic_plugins)
+    else:
+        raise Exception("No plugin named 'reindex' is installed inside elasticsearch, something not right!")
diff --git a/testcases/parallel_suites/cpu-pooling.robot b/testcases/parallel_suites/cpu-pooling.robot
new file mode 100644 (file)
index 0000000..6558c4f
--- /dev/null
@@ -0,0 +1,35 @@
+*** Setting ***
+Suite Setup       Set Basic Connection  ${cloudadmin}
+Suite Teardown    Close All Connections
+
+Library     ../../libraries/common/stack_infos.py
+Library     ../../libraries/common/execute_command.py
+Variables   ../../libraries/common/users.py
+
+Library     ../cpu_pooling/tc_001_cpu_pool_validation_tests.py
+Library     ../cpu_pooling/tc_002_exclusive_pool_tests.py
+Library     ../cpu_pooling/tc_003_exclusive_pool_tests_more_cpu.py
+Library     ../cpu_pooling/tc_004_shared_cpu_pool_tests.py
+
+Test Timeout    10 minutes
+
+*** Test Cases ***
+CAAS_DPDK_CPU_001
+    [Documentation]    TC 001 CPU Pool Validation Tests
+    [Tags]    CI
+    tc_001_cpu_pool_validation_tests
+
+CAAS_DPDK_CPU_002
+    [Documentation]    TC 002 Exclusive CPU Pool Tests
+    [Tags]    CI
+    tc_002_exclusive_pool_tests
+
+CAAS_DPDK_CPU_003
+    [Documentation]    TC 003 Exclusive CPU Pool Tests More CPU
+    [Tags]    CI
+    tc_003_exclusive_pool_tests_more_cpu
+
+CAAS_DPDK_CPU_004
+    [Documentation]    TC 004 Shared CPU Pool Tests
+    [Tags]    CI
+    tc_004_shared_cpu_pool_tests
\ No newline at end of file
diff --git a/testcases/parallel_suites/danm_network_check.robot b/testcases/parallel_suites/danm_network_check.robot
new file mode 100644 (file)
index 0000000..2c5cf3b
--- /dev/null
@@ -0,0 +1,29 @@
+*** Setting ***\r
+Suite Setup    Set Basic Connection  ${cloudadmin}\r
+Suite Teardown    Close All Connections\r
+\r
+Library     ../../libraries/common/stack_infos.py\r
+Library     ../../libraries/common/execute_command.py\r
+Variables   ../../libraries/common/users.py\r
+\r
+Library     ../danm_network_check/tc_001_danmnet_object_check.py\r
+Library     ../danm_network_check/tc_002_tenantnetwork_pod_check.py\r
+Library     ../danm_network_check/tc_003_clusternetwork_pod_check.py\r
+\r
+Test Timeout    12 minutes\r
+\r
+*** Test Cases ***\r
+CAAS_DANM_001\r
+    [Documentation]    TC 001 Danmnet Check\r
+    [Tags]    CI\r
+    tc_001_danmnet_object_check\r
+\r
+CAAS_DANM_002\r
+    [Documentation]    TC 002 Tenantnetwork Pod Check\r
+    [Tags]    CI\r
+    tc_002_tenantnetwork_pod_check\r
+\r
+CAAS_DANM_003\r
+    [Documentation]    TC 003 Clusternetwork Pod Check\r
+    [Tags]    CI\r
+    tc_003_clusternetwork_pod_check
\ No newline at end of file
diff --git a/testcases/parallel_suites/elasticity_test.robot b/testcases/parallel_suites/elasticity_test.robot
new file mode 100644 (file)
index 0000000..4d09c90
--- /dev/null
@@ -0,0 +1,24 @@
+*** Settings ***\r
+Suite Setup       Set Basic Connection    ${cloudadmin}\r
+Suite Teardown    Close All Connections\r
+\r
+Library           ../../libraries/common/stack_infos.py\r
+Library           ../../libraries/common/execute_command.py\r
+Variables         ../../libraries/common/users.py\r
+\r
+Library           ../HPA_check/HPA_check.py\r
+Library           ../HPA_check/Custom_HPA_check.py\r
+\r
+\r
+*** Test Cases ***\r
+CAAS_ELASTICITY_001\r
+    [Documentation]    HPA check\r
+    [Tags]   hpa    CI\r
+    [Timeout]    7 minutes\r
+    HPA_check\r
+\r
+CAAS_ELASTICITY_002\r
+    [Documentation]    Custom HPA check\r
+    [Tags]   hpa    CI\r
+    [Timeout]    9 minutes\r
+    Custom_HPA_check\r
diff --git a/testcases/parallel_suites/ssh_check.robot b/testcases/parallel_suites/ssh_check.robot
new file mode 100644 (file)
index 0000000..2aa993b
--- /dev/null
@@ -0,0 +1,63 @@
+*** Setting ***
+Suite Setup    Set Basic Connection  ${cloudadmin}
+Suite Teardown    Close All Connections
+
+Library     ../../libraries/common/stack_infos.py
+Library     ../../libraries/common/execute_command.py
+Variables   ../../libraries/common/users.py
+
+Library     ../basic_func_tests/tc_002_pod_health_check.py
+Library     ../basic_func_tests/tc_003_test_registry.py
+Library     ../basic_func_tests/tc_004_ssh_file_check.py
+Library     ../basic_func_tests/tc_005_ssh_dns_server_check.py
+Library     ../basic_func_tests/tc_006_ssh_test_ext_ntp.py
+Library     ../basic_func_tests/tc_007_ssh_test_overlay_quota.py
+Library     ../fluentd/tc_001_ssh_test_fluentd_logging.py
+Library     ../fluentd/tc_002_elasticsearch_storage_check.py
+Library     ../basic_func_tests/tc_008_storage_check.py
+
+
+Test Timeout    3 minutes
+
+*** Test Cases ***
+CAAS_BASIC_FUNC_001
+    [Documentation]    TC 008 Storage Check
+    [Tags]    CI
+    [Timeout]    20 minutes
+    tc_008_storage_check
+
+CAAS_BASIC_FUNC_002
+    [Documentation]    TC 002 pod health check
+    [Tags]    CI
+    [Timeout]    5 minutes
+    tc_002_pod_health_check
+
+CAAS_BASIC_FUNC_003
+    [Documentation]    TC 003 test_registry
+    [Tags]    CI
+    tc_003_test_registry
+
+CAAS_BASIC_FUNC_004
+    [Documentation]    TC 004 SSH file check
+    [Tags]    CI
+    tc_004_ssh_file_check
+
+CAAS_BASIC_FUNC_005
+    [Documentation]    TC 005 SSH DNS server check
+    [Tags]    CI
+    tc_005_ssh_dns_server_check
+
+CAAS_BASIC_FUNC_006
+    [Documentation]    TC 006 SSH Test Ext Ntp
+    [Tags]    CI
+    tc_006_ssh_test_ext_ntp
+
+CAAS_BASIC_FUNC_007
+    [Documentation]    TC 007 SSH Test Overlay Quota
+    [Tags]    CI
+    tc_007_ssh_test_overlay_quota
+
+CAAS_FLUENTD_001
+    [Documentation]    TC 001 ssh test fluentd logging
+    [Tags]    CI
+    tc_001_ssh_test_fluentd_logging
diff --git a/tox.ini b/tox.ini
index 45666fe..7c253ea 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -35,12 +35,13 @@ commands = py.test -v \
            --cov-branch \
            --cov-report term-missing \
            --cov-report html:coverage-html-{envname} \
            --cov-branch \
            --cov-report term-missing \
            --cov-report html:coverage-html-{envname} \
+           --ignore libraries/common/test_constants.py \
            --ignore testcases/pm-support/misc \
            {posargs:.}
 
 [pytest]
 cache_dir = .pytest-cache
            --ignore testcases/pm-support/misc \
            {posargs:.}
 
 [pytest]
 cache_dir = .pytest-cache
-pep8maxlinelength = 100
+pep8maxlinelength = 120
 
 [testenv:pylint]
 basepython = python2.7
 
 [testenv:pylint]
 basepython = python2.7
@@ -55,6 +56,7 @@ commands = py.test -m pylint -v \
            --pylint \
            --pylint-rcfile={toxinidir}/.pylintrc \
            --ignore resources/system_testing/latency \
            --pylint \
            --pylint-rcfile={toxinidir}/.pylintrc \
            --ignore resources/system_testing/latency \
+           --ignore libraries/common/test_constants.py \
           --ignore testcases/pm-support/misc \
            {posargs:.}
 
           --ignore testcases/pm-support/misc \
            {posargs:.}