robot tcs, test charts, robot container added
[ta/cloudtaf.git] / testcases / cpu_pooling / tc_004_shared_cpu_pool_tests.py
1 import sys
2 import os
3 import time
4 import yaml
5 from robot.libraries.BuiltIn import BuiltIn
6 from robot.libraries.String import String
7 from robot.api import logger
8 from datetime import datetime
9 from datetime import timedelta
10 from decorators_for_robot_functionalities import *
11 from test_constants import *
12
13 sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
14 import common_utils  # noqa
15
16
17 ex = BuiltIn().get_library_instance('execute_command')
18 cpupools = {}
19 max_shared_pool_size = 0
20
21
22 def tc_004_shared_cpu_pool_tests():
23     steps = [
24         'step1_shared_passed',
25         'step2_shared_fail'
26     ]
27
28     BuiltIn().run_keyword("tc_004_shared_cpu_pool_tests.Setup")
29     common_utils.keyword_runner(steps)
30
31
32 def Setup():
33     global cpupools, max_shared_pool_size, nodename
34     nodename = common_utils.decide_nodename()
35     cpupools = common_utils.get_cpupools()
36     logger.info("CPU pools: " + str(cpupools))
37     max_shared_pool_size = get_max_shared_cpus_len()
38
39
40 def step1_shared_passed():
41     cpu_request = 500
42     try:
43         common_utils.helm_install(chart_name="default/cpu-pooling-shared1", release_name="cpu-pooling",
44                                   values="registry_url={url},pool_req={cpu_req},"
45                                          "nodename={node_name}".format(url=reg, cpu_req=cpu_request,
46                                                                        node_name=nodename))
47         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod4,
48                                                     expected_result="1",
49                                                     filter=r'(Running)\s*[0]',
50                                                     timeout=90)
51
52         test_pod_cpu_usage(cpu_pooling_pod4['obj_name'], 90, cpu_request)
53         check_cpu_resources(cpu_pooling_pod4['obj_name'])
54
55     finally:
56         common_utils.helm_delete("cpu-pooling")
57         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod4,
58                                              tester_function=common_utils.test_kubernetes_object_not_available,
59                                              timeout=60)
60
61
62 def step2_shared_fail():
63     try:
64         common_utils.helm_install(chart_name="default/cpu-pooling-shared1", release_name="cpu-pooling",
65                                   values="registry_url={reg_url},pool_req={cpus},nodename={node_name}"
66                                   .format(reg_url=reg, cpus=(max_shared_pool_size*1000)+100, node_name=nodename))
67         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod4,
68                                                     expected_result="1",
69                                                     filter=r'(Pending)\s*[0]',
70                                                     timeout=90,
71                                                     delay=3)
72         ex.execute_unix_command('kubectl describe pod {podname} | grep "{check_str}"'
73                                 .format(podname=cpu_pooling_pod4['obj_name'],
74                                         check_str='Insufficient nokia.k8s.io/shared_caas'))
75     finally:
76         common_utils.helm_delete("cpu-pooling")
77         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod4,
78                                              tester_function=common_utils.test_kubernetes_object_not_available,
79                                              timeout=60)
80
81
82 @robot_log
83 def test_pod_cpu_usage(pod_name, timeout, threshold):
84     command = "kubectl top pod `kubectl get pod | grep {name} | awk '{{print $1}}'`".format(name=pod_name)
85     result, ec = ex.execute_unix_command(command, fail_on_non_zero_rc=False)
86     logger.info(ec + " - " + result)
87     wait_until = datetime.now() + timedelta(seconds=timeout)
88     while (ec != "0" or "0m" in result) and (datetime.now() < wait_until):
89         result, ec = ex.execute_unix_command(command, fail_on_non_zero_rc=False)
90         logger.info(ec + " - " + result)
91         time.sleep(1)
92     if ec != "0":
93         raise Exception("test_pod_cpu_usage failed: " + result)
94     else:
95         result = result.splitlines()[1].split()[1]
96     if int(result[:-1]) < threshold - 10 or int(result[:-1]) > threshold + 10:
97         raise Exception("CPU usage: {0} - request: {1}m".format(result, threshold))
98
99
100 def get_max_shared_cpus_len():
101     maxlen = 0
102     for node in cpupools:
103         if 'shared_caas' in cpupools[node].keys() and len(cpupools[node]['shared_caas']) > maxlen:
104             maxlen = len(cpupools[node]['shared_caas'])
105     return maxlen
106
107
108 @robot_log
109 def check_cpu_resources(pod_name):
110     command = "kubectl get pod `kubectl get pod | grep {name} | awk '{{print $1}}'` -o yaml".format(name=pod_name)
111     result = ex.execute_unix_command(command)
112     result_dict = yaml.safe_load(result)
113     resources = result_dict['spec']['containers'][0]['resources']
114     if resources['requests']['cpu'] != '0':
115         raise Exception("CPU request should be 0! CPU request: " + resources['requests']['cpu'])
116     if resources['limits']['cpu'][:-1] != resources['limits']['nokia.k8s.io/shared_caas']:
117         raise Exception("CPU limit should be equal to nokia.k8s.io/shared_caas! " + resources['requests']['cpu'])