robot tcs, test charts, robot container added
[ta/cloudtaf.git] / testcases / cpu_pooling / tc_001_cpu_pool_validation_tests.py
1 import sys
2 import os
3 import re
4 from robot.libraries.BuiltIn import BuiltIn
5 from robot.libraries.String import String
6 from robot.api import logger
7 from decorators_for_robot_functionalities import *
8 from time import sleep
9 from test_constants import *
10
11 sys.path.append(os.path.join(os.path.dirname(__file__), '..', '../libraries/common'))
12 import common_utils  # noqa
13
14
15 ex = BuiltIn().get_library_instance('execute_command')
16 cpupools = {}
17
18
19 def tc_001_cpu_pool_validation_tests():
20     steps = [
21         'step1_check_default_pool_cpu_node_capacity',
22         'step2_exclusive_and_shared',
23         'step3_annotation_without_requests',
24         'step4_annotation_without_container',
25         'step5_annotation_without_cpus',
26         'step6_request_for_default_pool',
27         'step7_pod_use_default_pool_guaranteed',
28         'step8_pod_use_default_pool_burstable',
29         'step9_1_exclusive_1_shared',
30         'step10_cpu_allowed_list_set_after_test_pod_deployed'
31     ]
32     BuiltIn().run_keyword("tc_001_cpu_pool_validation_tests.Setup")
33     common_utils.keyword_runner(steps)
34
35
36 @pabot_lock("flannel_ip")
37 def Setup():
38     global cpupools, nodename
39     nodename = common_utils.decide_nodename()
40     cpupools = common_utils.get_cpupools()
41     logger.info("CPU pools: " + str(cpupools))
42     logger.info("Default nodename to deploy: " + nodename)
43
44
45 # set lock to not run with HPA_checks tests
46 @pabot_lock("health_check_1")
47 @pabot_lock("flannel_ip")
48 def step1_check_default_pool_cpu_node_capacity():
49     node_cpu_capacity = get_node_cpu_capacity(nodename)
50     cpu_request = "{0}m".format(node_cpu_capacity)
51     try:
52         common_utils.helm_install(chart_name="default/cpu-pooling-default1", release_name="cpu-pooling",
53                                   values="registry_url={reg_url},nodename={node_name},cpu_request={cpu},cpu_limit={cpu}"
54                                   .format(reg_url=reg, node_name=nodename, cpu=cpu_request))
55         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod7,
56                                                     expected_result="1",
57                                                     filter=r'(Running)\s*[0]',
58                                                     timeout=90)
59         logger.info("Default pool allocation successfull with maximum allocatable cpus!")
60         common_utils.helm_delete("cpu-pooling")
61         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod7,
62                                              tester_function=common_utils.test_kubernetes_object_not_available,
63                                              timeout=60)
64
65         cpu_request = "{0}m".format(node_cpu_capacity + 10)
66         common_utils.helm_install(chart_name="default/cpu-pooling-default1", release_name="cpu-pooling",
67                                   values="registry_url={reg_url},nodename={node_name},cpu_request={cpu},cpu_limit={cpu}"
68                                   .format(reg_url=reg, node_name=nodename, cpu=cpu_request))
69         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod7,
70                                                     expected_result="1",
71                                                     filter=r'(Pending)\s*[0]',
72                                                     timeout=90,
73                                                     delay=3)
74         logger.info("Default pool allocation failed with more cpu than allocatable as expected!")
75     finally:
76         common_utils.helm_delete("cpu-pooling")
77         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod7,
78                                              tester_function=common_utils.test_kubernetes_object_not_available,
79                                              timeout=60)
80
81
82 @pabot_lock("health_check_1")
83 @pabot_lock("flannel_ip")
84 def step2_exclusive_and_shared():
85     try:
86         common_utils.helm_install(chart_name="default/cpu-pooling-mix2", release_name="cpu-pooling",
87                                   values="registry_url={reg_url}".format(reg_url=reg))
88
89         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod6,
90                                                     expected_result="1",
91                                                     filter=r'(Running)\s*[0]',
92                                                     timeout=90)
93         allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod6['obj_name'])
94         requested_cpupool = cpupools[nodename]['exclusive_caas'] + cpupools[nodename]['shared_caas']
95         if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, requested_cpupool):
96             raise Exception('{pod} not allocate CPUs from {req_pool} pool!'.format(pod=cpu_pooling_pod6['obj_name'],
97                                                                                    req_pool=requested_cpupool))
98     finally:
99         common_utils.helm_delete("cpu-pooling")
100         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod6,
101                                              tester_function=common_utils.test_kubernetes_object_not_available,
102                                              timeout=90)
103
104
105 @pabot_lock("health_check_1")
106 @pabot_lock("flannel_ip")
107 def step3_annotation_without_requests():
108     try:
109         common_utils.helm_install(chart_name="default/cpu-pooling-annotation1", release_name="cpu-pooling",
110                                   values="registry_url={reg_url}".format(reg_url=reg))
111         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod9,
112                                              tester_function=common_utils.test_kubernetes_object_available,
113                                              timeout=30,
114                                              delay=3)
115
116         result = ex.execute_unix_command('kubectl describe replicasets {0}'.format(cpu_pooling_pod9['obj_name']))
117
118         error = 'Container cpu-pooling has no pool requests in pod spec'
119
120         if error not in result:
121             raise Exception('Replicaset description does not contain expected error! -' + result)
122         else:
123             logger.info(error)
124     finally:
125         common_utils.helm_delete("cpu-pooling")
126         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod9,
127                                              tester_function=common_utils.test_kubernetes_object_not_available,
128                                              timeout=60)
129
130
131 @pabot_lock("health_check_1")
132 @pabot_lock("flannel_ip")
133 def step4_annotation_without_container():
134     try:
135         common_utils.helm_install(chart_name="default/cpu-pooling-annotation2", release_name="cpu-pooling",
136                                   values="registry_url={reg_url}".format(reg_url=reg))
137         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod10,
138                                              tester_function=common_utils.test_kubernetes_object_available,
139                                              timeout=30,
140                                              delay=3)
141
142         result = ex.execute_unix_command('kubectl describe replicasets {0}'.format(cpu_pooling_pod10['obj_name']))
143
144         error = "'container' is mandatory in annotation"
145
146         if error not in result:
147             raise Exception('Replicaset description does not contain expected error! -' + result)
148         else:
149             logger.info(error)
150     finally:
151         common_utils.helm_delete("cpu-pooling")
152         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod10,
153                                              tester_function=common_utils.test_kubernetes_object_not_available,
154                                              timeout=60)
155
156
157 @pabot_lock("health_check_1")
158 @pabot_lock("flannel_ip")
159 def step5_annotation_without_cpus():
160     try:
161         common_utils.helm_install(chart_name="default/cpu-pooling-annotation3", release_name="cpu-pooling",
162                                   values="registry_url={reg_url}".format(reg_url=reg))
163         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod11,
164                                              tester_function=common_utils.test_kubernetes_object_available,
165                                              timeout=30,
166                                              delay=3)
167
168         result = ex.execute_unix_command('kubectl describe replicasets {0}'.format(cpu_pooling_pod11['obj_name']))
169
170         error = "'cpus' field is mandatory in annotation"
171
172         if error not in result:
173             raise Exception('Replicaset description does not contain expected error! -' + result)
174         else:
175             logger.info(error)
176     finally:
177         common_utils.helm_delete("cpu-pooling")
178         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod11,
179                                              tester_function=common_utils.test_kubernetes_object_not_available,
180                                              timeout=60)
181
182
183 @pabot_lock("health_check_1")
184 @pabot_lock("flannel_ip")
185 def step6_request_for_default_pool():
186     try:
187         common_utils.helm_install(chart_name="default/cpu-pooling-default2", release_name="cpu-pooling",
188                                   values="registry_url={reg_url}".format(reg_url=reg))
189         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod8,
190                                                     expected_result="1",
191                                                     filter=r'(Pending)\s*[0]',
192                                                     timeout=30,
193                                                     delay=3)
194         error = "Insufficient nokia.k8s.io/default"
195         result = ex.execute_unix_command('kubectl describe pod {podname}'.format(podname=cpu_pooling_pod8['obj_name']))
196
197         if error not in result:
198             raise Exception('Replicaset description does not contain expected error! -' + result)
199         else:
200             logger.info(error)
201     finally:
202         common_utils.helm_delete("cpu-pooling")
203         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod8,
204                                              tester_function=common_utils.test_kubernetes_object_not_available,
205                                              timeout=60)
206
207
208 @pabot_lock("flannel_ip")
209 def step7_pod_use_default_pool_guaranteed():
210     try:
211         common_utils.helm_install(chart_name="default/cpu-pooling-default1", release_name="cpu-pooling",
212                                   values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg,
213                                                                                               node_name=nodename))
214         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod7,
215                                                     expected_result="1",
216                                                     filter=r'(Running)\s*[0]',
217                                                     timeout=90)
218
219         allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod7['obj_name'])
220         default_pool = cpupools[nodename]['default']
221         if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, default_pool):
222             raise Exception('{pod} not allocate CPU from default pool!'.format(pod=cpu_pooling_pod7['obj_name']))
223         check_qos_of_pod(cpu_pooling_pod7['obj_name'], "Guaranteed")
224     finally:
225         common_utils.helm_delete("cpu-pooling")
226         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod7,
227                                              tester_function=common_utils.test_kubernetes_object_not_available,
228                                              timeout=60)
229
230
231 @pabot_lock("flannel_ip")
232 def step8_pod_use_default_pool_burstable():
233     memory_request = "500Mi"
234     cpu_request = "250m"
235     try:
236         common_utils.helm_install(chart_name="default/cpu-pooling-default1", release_name="cpu-pooling",
237                                   values="registry_url={reg_url},nodename={node_name},mem_request={mem},"
238                                          "cpu_request={cpu}".format(reg_url=reg, node_name=nodename, mem=memory_request,
239                                                                     cpu=cpu_request))
240         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod7,
241                                                     expected_result="1",
242                                                     filter=r'(Running)\s*[0]',
243                                                     timeout=90)
244
245         allowed_cpu_for_pod = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod7['obj_name'])
246         default_pool = cpupools[nodename]['default']
247         if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpu_for_pod, default_pool):
248             raise Exception('{pod} not allocate CPU from default pool!'.format(pod=cpu_pooling_pod7['obj_name']))
249         check_qos_of_pod(cpu_pooling_pod7['obj_name'], "Burstable")
250     finally:
251         common_utils.helm_delete("cpu-pooling")
252         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod7,
253                                              tester_function=common_utils.test_kubernetes_object_not_available,
254                                              timeout=60)
255
256
257 @pabot_lock("flannel_ip")
258 def step9_1_exclusive_1_shared():
259     try:
260         common_utils.helm_install(chart_name="default/cpu-pooling-mix1", release_name="cpu-pooling",
261                                   values="registry_url={reg_url},nodename={node_name}".format(reg_url=reg,
262                                                                                               node_name=nodename))
263         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod5,
264                                                     expected_result="1",
265                                                     filter=r'(Running)\s*[0]',
266                                                     timeout=90)
267     finally:
268         common_utils.helm_delete("cpu-pooling")
269         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod5,
270                                              tester_function=common_utils.test_kubernetes_object_not_available,
271                                              timeout=90)
272
273
274 @pabot_lock("cpu_pooling")
275 @pabot_lock("flannel_ip")
276 def step10_cpu_allowed_list_set_after_test_pod_deployed():
277     cpu_setter_deleted = False
278     try:
279         cpu_pooling_setter["obj_count"] = ex.execute_unix_command("kubectl get pod --all-namespaces | "
280                                                                   "grep setter | wc -l")
281         ex.execute_unix_command("kubectl get ds -n kube-system cpu-setter -o yaml")
282         ex.execute_unix_command("kubectl get ds -n kube-system cpu-setter -o yaml > setter.yaml")
283         ex.execute_unix_command("kubectl delete ds -n kube-system cpu-setter")
284
285         cpu_setter_deleted = True
286
287         common_utils.check_kubernetes_object(kube_object=cpu_pooling_setter,
288                                              tester_function=common_utils.test_kubernetes_object_not_available,
289                                              timeout=90)
290
291         common_utils.helm_install(chart_name="default/cpu-pooling-exclusive1", release_name="cpu-pooling",
292                                   values="registry_url=" + reg + ",nodename=" + nodename)
293         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_pod1,
294                                                     expected_result="1",
295                                                     filter=r'(Running)\s*[0]',
296                                                     timeout=90)
297
298         allowed_cpus_for_pod_before = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod1['obj_name'])
299
300         ex.execute_unix_command("kubectl create -f setter.yaml")
301
302         common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_setter,
303                                                     expected_result=cpu_pooling_setter["obj_count"],
304                                                     filter=r'(Running)\s*[0]',
305                                                     timeout=90)
306         cpu_setter_deleted = False
307         allowed_cpus_for_pod_after = common_utils.get_cpu_allowed_list_from_pod(cpu_pooling_pod1['obj_name'])
308         exclusive_cpus = cpupools[nodename]['exclusive_caas']
309         if not common_utils.allowed_cpus_is_in_cpu_pool(allowed_cpus_for_pod_after, exclusive_cpus):
310             raise Exception('{pod} not allocate CPU from exclusive pool!'.format(pod=cpu_pooling_pod1['obj_name']))
311         if set(allowed_cpus_for_pod_before) == set(allowed_cpus_for_pod_after):
312             raise Exception('Allocated CPUs before setter deployed is equal with CPU set after deploy!')
313     finally:
314         common_utils.helm_delete("cpu-pooling")
315         common_utils.check_kubernetes_object(kube_object=cpu_pooling_pod1,
316                                              tester_function=common_utils.test_kubernetes_object_not_available,
317                                              timeout=90)
318         setter_count = ex.execute_unix_command("kubectl get pod --all-namespaces | grep setter | wc -l")
319         if cpu_setter_deleted:
320             if setter_count != "0":
321                 search_cmd = "kubectl get pod -n kube-system |grep setter | awk '{print $1}'"
322                 del_cmd = "kubectl -n kube-system delete pod --grace-period=0 --force --wait=false"
323
324                 ex.execute_unix_command("for i in `{search}`; do {delete} $i; done".format(search=search_cmd,
325                                                                                            delete=del_cmd))
326                 common_utils.check_kubernetes_object(kube_object=cpu_pooling_setter,
327                                                      tester_function=common_utils.test_kubernetes_object_not_available,
328                                                      timeout=90)
329             ex.execute_unix_command("kubectl create -f setter.yaml")
330
331             common_utils.test_kubernetes_object_quality(kube_object=cpu_pooling_setter,
332                                                         expected_result=cpu_pooling_setter["obj_count"],
333                                                         filter=r'(Running)\s*[0]',
334                                                         timeout=90)
335
336
337 @robot_log
338 def check_qos_of_pod(podname, qos_type):
339     command = "kubectl describe pod " \
340               "`kubectl get pod | grep {0} | awk '{{print $1}}'` | grep 'QoS Class:'".format(podname)
341     result = ex.execute_unix_command(command)
342     if qos_type not in result:
343         raise Exception("{pod} QoS should be {qos}, instead of {result}!".format(pod=podname, qos=qos_type,
344                                                                                  result=result))
345
346
347 @robot_log
348 def get_node_cpu_capacity(node_name):
349     command = "kubectl describe node `kubectl get no -L=nodename | grep {nodename} | awk '{{print $1}}'`"\
350         .format(nodename=node_name)
351     result = ex.execute_unix_command(command)
352     matched = re.search(r'Allocatable:(.|\n)*cpu:\s+(\d+)', result)
353     if matched:
354         max_cap = int(matched.group(2)) * 1000
355         matched = re.search(r'cpu\s+(\d+)m', result)
356         if matched:
357             return max_cap - int(matched.group(1))
358     raise Exception('Failed getting node CPU capacity!')