OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # Copyright 2013 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 import logging | |
7 import os | |
8 import shutil | |
9 import subprocess | |
10 import sys | |
11 import tempfile | |
12 import unittest | |
13 | |
14 ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) | |
15 sys.path.insert(0, ROOT_DIR) | |
16 | |
17 import run_test_cases | |
18 | |
19 OUTPUT = os.path.join(ROOT_DIR, 'tests', 'run_test_cases', 'output.py') | |
20 | |
21 | |
22 def to_native_eol(string): | |
23 if string is None: | |
24 return string | |
25 if sys.platform == 'win32': | |
26 return string.replace('\n', '\r\n') | |
27 return string | |
28 | |
29 | |
30 class ListTestCasesTest(unittest.TestCase): | |
31 def test_shards(self): | |
32 test_cases = ( | |
33 (range(10), 10, 0, 1), | |
34 | |
35 ([0, 1], 5, 0, 3), | |
36 ([2, 3], 5, 1, 3), | |
37 ([4 ], 5, 2, 3), | |
38 | |
39 ([0], 5, 0, 7), | |
40 ([1], 5, 1, 7), | |
41 ([2], 5, 2, 7), | |
42 ([3], 5, 3, 7), | |
43 ([4], 5, 4, 7), | |
44 ([ ], 5, 5, 7), | |
45 ([ ], 5, 6, 7), | |
46 | |
47 ([0, 1], 4, 0, 2), | |
48 ([2, 3], 4, 1, 2), | |
49 ) | |
50 for expected, range_length, index, shards in test_cases: | |
51 result = run_test_cases.filter_shards(range(range_length), index, shards) | |
52 self.assertEqual( | |
53 expected, result, (result, expected, range_length, index, shards)) | |
54 | |
55 | |
56 def process_output(content, test_cases): | |
57 return list( | |
58 run_test_cases.process_output(content.splitlines(True), test_cases)) | |
59 | |
60 | |
61 class RunTestCasesSlow(unittest.TestCase): | |
62 def test_call_with_timeout(self): | |
63 timedout = 1 if sys.platform == 'win32' else -9 | |
64 # Format is: | |
65 # ( (cmd, stderr_pipe, timeout), (stdout, stderr, returncode) ), ... | |
66 test_data = [ | |
67 # 0 means no timeout, like None. | |
68 ( | |
69 (['out_sleeping', '0.001', 'out_slept', 'err_print'], None, 0), | |
70 ('Sleeping.\nSlept.\n', None, 0), | |
71 ), | |
72 ( | |
73 (['err_print'], subprocess.STDOUT, 0), | |
74 ('printing', None, 0), | |
75 ), | |
76 ( | |
77 (['err_print'], subprocess.PIPE, 0), | |
78 ('', 'printing', 0), | |
79 ), | |
80 | |
81 # On a loaded system, this can be tight. | |
82 ( | |
83 (['out_sleeping', 'out_flush', '100', 'out_slept'], None, 0.5), | |
84 ('Sleeping.\n', '', timedout), | |
85 ), | |
86 ( | |
87 ( | |
88 # Note that err_flush is necessary on Windows but not on the other | |
89 # OSes. This means the likelihood of missing stderr output from a | |
90 # killed child process on Windows is much higher than on other OSes. | |
91 [ | |
92 'out_sleeping', 'out_flush', 'err_print', 'err_flush', '100', | |
93 'out_slept', | |
94 ], | |
95 subprocess.PIPE, | |
96 0.5), | |
97 ('Sleeping.\n', 'printing', timedout), | |
98 ), | |
99 | |
100 ( | |
101 (['out_sleeping', '0.001', 'out_slept'], None, 100), | |
102 ('Sleeping.\nSlept.\n', '', 0), | |
103 ), | |
104 ] | |
105 for i, (data, expected) in enumerate(test_data): | |
106 stdout, stderr, code, duration = run_test_cases.call_with_timeout( | |
107 [sys.executable, OUTPUT] + data[0], | |
108 stderr=data[1], | |
109 timeout=data[2]) | |
110 self.assertTrue(duration > 0.0001, (data, duration)) | |
111 self.assertEqual( | |
112 (i, stdout, stderr, code), | |
113 (i, | |
114 to_native_eol(expected[0]), | |
115 to_native_eol(expected[1]), | |
116 expected[2])) | |
117 | |
118 # Try again with universal_newlines=True. | |
119 stdout, stderr, code, duration = run_test_cases.call_with_timeout( | |
120 [sys.executable, OUTPUT] + data[0], | |
121 stderr=data[1], | |
122 timeout=data[2], | |
123 universal_newlines=True) | |
124 self.assertTrue(duration > 0.0001, (data, duration)) | |
125 self.assertEqual( | |
126 (i, stdout, stderr, code), | |
127 (i,) + expected) | |
128 | |
129 def test_recv_any(self): | |
130 combinations = [ | |
131 { | |
132 'cmd': ['out_print', 'err_print'], | |
133 'stdout': None, | |
134 'stderr': None, | |
135 'expected': {}, | |
136 }, | |
137 { | |
138 'cmd': ['out_print', 'err_print'], | |
139 'stdout': None, | |
140 'stderr': subprocess.STDOUT, | |
141 'expected': {}, | |
142 }, | |
143 | |
144 { | |
145 'cmd': ['out_print'], | |
146 'stdout': subprocess.PIPE, | |
147 'stderr': subprocess.PIPE, | |
148 'expected': {'stdout': 'printing'}, | |
149 }, | |
150 { | |
151 'cmd': ['out_print'], | |
152 'stdout': subprocess.PIPE, | |
153 'stderr': None, | |
154 'expected': {'stdout': 'printing'}, | |
155 }, | |
156 { | |
157 'cmd': ['out_print'], | |
158 'stdout': subprocess.PIPE, | |
159 'stderr': subprocess.STDOUT, | |
160 'expected': {'stdout': 'printing'}, | |
161 }, | |
162 | |
163 { | |
164 'cmd': ['err_print'], | |
165 'stdout': subprocess.PIPE, | |
166 'stderr': subprocess.PIPE, | |
167 'expected': {'stderr': 'printing'}, | |
168 }, | |
169 { | |
170 'cmd': ['err_print'], | |
171 'stdout': None, | |
172 'stderr': subprocess.PIPE, | |
173 'expected': {'stderr': 'printing'}, | |
174 }, | |
175 { | |
176 'cmd': ['err_print'], | |
177 'stdout': subprocess.PIPE, | |
178 'stderr': subprocess.STDOUT, | |
179 'expected': {'stdout': 'printing'}, | |
180 }, | |
181 | |
182 { | |
183 'cmd': ['out_print', 'err_print'], | |
184 'stdout': subprocess.PIPE, | |
185 'stderr': subprocess.PIPE, | |
186 'expected': {'stderr': 'printing', 'stdout': 'printing'}, | |
187 }, | |
188 { | |
189 'cmd': ['out_print', 'err_print'], | |
190 'stdout': subprocess.PIPE, | |
191 'stderr': subprocess.STDOUT, | |
192 'expected': {'stdout': 'printingprinting'}, | |
193 }, | |
194 ] | |
195 for i, data in enumerate(combinations): | |
196 cmd = [sys.executable, OUTPUT] + data['cmd'] | |
197 p = run_test_cases.Popen( | |
198 cmd, stdout=data['stdout'], stderr=data['stderr']) | |
199 actual = {} | |
200 while p.poll() is None: | |
201 pipe, d = p.recv_any() | |
202 if pipe is not None: | |
203 actual.setdefault(pipe, '') | |
204 actual[pipe] += d | |
205 while True: | |
206 pipe, d = p.recv_any() | |
207 if pipe is None: | |
208 break | |
209 actual.setdefault(pipe, '') | |
210 actual[pipe] += d | |
211 self.assertEqual(data['expected'], actual, (i, data['expected'], actual)) | |
212 self.assertEqual((None, None), p.recv_any()) | |
213 self.assertEqual(0, p.returncode) | |
214 | |
215 @staticmethod | |
216 def _get_output_sleep_proc(flush, env, duration): | |
217 command = [ | |
218 'import sys,time', | |
219 'print(\'A\')', | |
220 ] | |
221 if flush: | |
222 # Sadly, this doesn't work otherwise in some combination. | |
223 command.append('sys.stdout.flush()') | |
224 command.extend(( | |
225 'time.sleep(%s)' % duration, | |
226 'print(\'B\')', | |
227 )) | |
228 return run_test_cases.Popen( | |
229 [ | |
230 sys.executable, | |
231 '-c', | |
232 ';'.join(command), | |
233 ], | |
234 stdout=subprocess.PIPE, | |
235 universal_newlines=True, | |
236 env=env) | |
237 | |
238 def test_yield_any_None(self): | |
239 for duration in (0.05, 0.1, 0.5, 2): | |
240 try: | |
241 proc = self._get_output_sleep_proc(True, {}, duration) | |
242 expected = [ | |
243 'A\n', | |
244 'B\n', | |
245 ] | |
246 for p, data in proc.yield_any(timeout=None): | |
247 self.assertEqual('stdout', p) | |
248 self.assertEqual(expected.pop(0), data) | |
249 self.assertEqual(0, proc.returncode) | |
250 self.assertEqual([], expected) | |
251 break | |
252 except AssertionError: | |
253 if duration != 2: | |
254 print('Sleeping rocks. trying more slowly.') | |
255 continue | |
256 raise | |
257 | |
258 def test_yield_any_0(self): | |
259 for duration in (0.05, 0.1, 0.5, 2): | |
260 try: | |
261 proc = self._get_output_sleep_proc(True, {}, duration) | |
262 expected = [ | |
263 'A\n', | |
264 'B\n', | |
265 ] | |
266 got_none = False | |
267 for p, data in proc.yield_any(timeout=0): | |
268 if not p: | |
269 got_none = True | |
270 continue | |
271 self.assertEqual('stdout', p) | |
272 self.assertEqual(expected.pop(0), data) | |
273 self.assertEqual(0, proc.returncode) | |
274 self.assertEqual([], expected) | |
275 self.assertEqual(True, got_none) | |
276 break | |
277 except AssertionError: | |
278 if duration != 2: | |
279 print('Sleeping rocks. trying more slowly.') | |
280 continue | |
281 raise | |
282 | |
283 def test_recv_any_None(self): | |
284 values = ( | |
285 (True, ['A\n', 'B\n'], {}), | |
286 (False, ['A\nB\n'], {}), | |
287 (False, ['A\n', 'B\n'], {'PYTHONUNBUFFERED': 'x'}), | |
288 ) | |
289 for flush, exp, env in values: | |
290 for duration in (0.05, 0.1, 0.5, 2): | |
291 expected = exp[:] | |
292 try: | |
293 proc = self._get_output_sleep_proc(flush, env, duration) | |
294 while True: | |
295 p, data = proc.recv_any(timeout=None) | |
296 if not p: | |
297 break | |
298 self.assertEqual('stdout', p) | |
299 if not expected: | |
300 self.fail(data) | |
301 e = expected.pop(0) | |
302 if env: | |
303 # Buffering is truly a character-level and could get items | |
304 # individually. This is usually seen only during high load, try | |
305 # compiling at the same time to reproduce it. | |
306 if len(data) < len(e): | |
307 expected.insert(0, e[len(data):]) | |
308 e = e[:len(data)] | |
309 self.assertEqual(e, data) | |
310 # Contrary to yield_any() or recv_any(0), wait() needs to be used | |
311 # here. | |
312 proc.wait() | |
313 self.assertEqual([], expected) | |
314 self.assertEqual(0, proc.returncode) | |
315 except AssertionError: | |
316 if duration != 2: | |
317 print('Sleeping rocks. trying more slowly.') | |
318 continue | |
319 raise | |
320 | |
321 def test_recv_any_0(self): | |
322 values = ( | |
323 (True, ['A\n', 'B\n'], {}), | |
324 (False, ['A\nB\n'], {}), | |
325 (False, ['A\n', 'B\n'], {'PYTHONUNBUFFERED': 'x'}), | |
326 ) | |
327 for i, (flush, exp, env) in enumerate(values): | |
328 for duration in (0.1, 0.5, 2): | |
329 expected = exp[:] | |
330 try: | |
331 proc = self._get_output_sleep_proc(flush, env, duration) | |
332 got_none = False | |
333 while True: | |
334 p, data = proc.recv_any(timeout=0) | |
335 if not p: | |
336 if proc.poll() is None: | |
337 got_none = True | |
338 continue | |
339 break | |
340 self.assertEqual('stdout', p) | |
341 if not expected: | |
342 self.fail(data) | |
343 e = expected.pop(0) | |
344 if sys.platform == 'win32': | |
345 # Buffering is truly a character-level on Windows and could get | |
346 # items individually. | |
347 if len(data) < len(e): | |
348 expected.insert(0, e[len(data):]) | |
349 e = e[:len(data)] | |
350 self.assertEqual(e, data) | |
351 | |
352 self.assertEqual(0, proc.returncode) | |
353 self.assertEqual([], expected) | |
354 self.assertEqual(True, got_none) | |
355 except Exception as e: | |
356 if duration != 2: | |
357 print('Sleeping rocks. trying more slowly.') | |
358 continue | |
359 print >> sys.stderr, 'Failure at index %d' % i | |
360 raise | |
361 | |
362 def test_gtest_filter(self): | |
363 old = run_test_cases.run_test_cases | |
364 exe = os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_pass.py') | |
365 def expect( | |
366 executable, cwd, test_cases, jobs, timeout, clusters, retries, | |
367 run_all, max_failures, no_cr, gtest_output, result_file, verbose): | |
368 self.assertEqual( | |
369 run_test_cases.tools.fix_python_path([exe]), executable) | |
370 self.assertEqual(os.getcwd(), cwd) | |
371 # They are in reverse order due to test shuffling. | |
372 self.assertEqual(['Foo.Bar1', 'Foo.Bar/3'], test_cases) | |
373 self.assertEqual(run_test_cases.threading_utils.num_processors(), jobs) | |
374 self.assertEqual(75, timeout) | |
375 self.assertEqual(None, clusters) | |
376 self.assertEqual(2, retries) | |
377 self.assertEqual(None, run_all) | |
378 self.assertEqual(None, no_cr) | |
379 self.assertEqual('', gtest_output) | |
380 self.assertEqual(None, max_failures) | |
381 self.assertEqual(exe + '.run_test_cases', result_file) | |
382 self.assertFalse(verbose) | |
383 return 89 | |
384 | |
385 try: | |
386 run_test_cases.run_test_cases = expect | |
387 result = run_test_cases.main([exe, '--gtest_filter=Foo.Bar*-*.Bar2']) | |
388 self.assertEqual(89, result) | |
389 finally: | |
390 run_test_cases.run_test_cases = old | |
391 | |
392 def test_ResetableTimeout(self): | |
393 self.assertTrue(run_test_cases.ResetableTimeout(0)) | |
394 a = run_test_cases.ResetableTimeout(1) | |
395 self.assertEqual(1., float(a)) | |
396 count = 0 | |
397 for count in xrange(1000000): | |
398 value = float(a) | |
399 self.assertTrue(value >= 1., value) | |
400 if value != 1.: | |
401 break | |
402 a.reset() | |
403 self.assertTrue(value > 1., value) | |
404 # Assume no 10s jank. | |
405 self.assertTrue(value < 10., value) | |
406 self.assertTrue(count < 1000000, count) | |
407 self.assertTrue(count > 0, count) | |
408 | |
409 | |
410 class RunTestCasesFast(unittest.TestCase): | |
411 def test_convert_to_lines(self): | |
412 data = [ | |
413 ( | |
414 ('blah'), | |
415 ['blah'], | |
416 ), | |
417 ( | |
418 ('blah\n'), | |
419 ['blah\n'], | |
420 ), | |
421 ( | |
422 ('blah', '\n'), | |
423 ['blah\n'], | |
424 ), | |
425 ( | |
426 ('\n'), | |
427 ['\n'], | |
428 ), | |
429 ( | |
430 ('blah blah\nboo'), | |
431 ['blah blah\n', 'boo'], | |
432 ), | |
433 ( | |
434 ('b', 'lah blah\nboo'), | |
435 ['blah blah\n', 'boo'], | |
436 ), | |
437 ] | |
438 for generator, expected in data: | |
439 self.assertEqual( | |
440 expected, | |
441 list(run_test_cases.convert_to_lines(generator))) | |
442 | |
443 def testRunSome(self): | |
444 tests = [ | |
445 # Try with named arguments. Accepts 3*1 failures. | |
446 ( | |
447 run_test_cases.RunSome( | |
448 expected_count=10, | |
449 retries=2, | |
450 min_failures=1, | |
451 max_failure_ratio=0.001, | |
452 max_failures=None), | |
453 [False] * 4), | |
454 # Same without named arguments. | |
455 (run_test_cases.RunSome( 10, 2, 1, 0.001, None), [False] * 4), | |
456 | |
457 (run_test_cases.RunSome( 10, 0, 1, 0.001, None), [False] * 2), | |
458 (run_test_cases.RunSome( 10, 0, 1, 0.010, None), [False] * 2), | |
459 | |
460 # For low expected_count value, retries * min_failures is the minimum | |
461 # bound of accepted failures. | |
462 (run_test_cases.RunSome( 10, 2, 1, 0.010, None), [False] * 4), | |
463 (run_test_cases.RunSome( 10, 2, 1, 0.020, None), [False] * 4), | |
464 (run_test_cases.RunSome( 10, 2, 1, 0.050, None), [False] * 4), | |
465 (run_test_cases.RunSome( 10, 2, 1, 0.100, None), [False] * 4), | |
466 (run_test_cases.RunSome( 10, 2, 1, 0.110, None), [False] * 4), | |
467 | |
468 # Allows expected_count + retries failures at maximum. | |
469 (run_test_cases.RunSome( 10, 2, 1, 0.200, None), [False] * 6), | |
470 (run_test_cases.RunSome( 10, 2, 1, 0.999, None), [False] * 30), | |
471 | |
472 # The asympthote is nearing max_failure_ratio for large expected_count | |
473 # values. | |
474 (run_test_cases.RunSome(1000, 2, 1, 0.050, None), [False] * 150), | |
475 ] | |
476 for index, (decider, rounds) in enumerate(tests): | |
477 for index2, r in enumerate(rounds): | |
478 self.assertFalse(decider.should_stop(), (index, index2, str(decider))) | |
479 decider.got_result(r) | |
480 self.assertTrue(decider.should_stop(), (index, str(decider))) | |
481 | |
482 def testStatsInfinite(self): | |
483 decider = run_test_cases.RunAll() | |
484 for _ in xrange(200): | |
485 self.assertFalse(decider.should_stop()) | |
486 decider.got_result(False) | |
487 | |
488 def test_process_output_garbage(self): | |
489 data = 'garbage\n' | |
490 expected = [ | |
491 { | |
492 'duration': None, | |
493 'output': None, | |
494 'returncode': None, | |
495 'test_case': 'Test.1', | |
496 }, | |
497 { | |
498 'duration': None, | |
499 'output': None, | |
500 'returncode': None, | |
501 'test_case': 'Test.2', | |
502 }, | |
503 ] | |
504 actual = process_output(data, ['Test.1', 'Test.2']) | |
505 self.assertEqual(expected, actual) | |
506 | |
507 def test_process_output_crash_cr(self): | |
508 # CR only is supported. Let's assume a crash. | |
509 data = '[ RUN ] Test.1\r[ RUN ] Test.2\r' | |
510 expected = [ | |
511 { | |
512 'crashed': True, | |
513 'duration': 0, | |
514 'output': '[ RUN ] Test.1\r', | |
515 'returncode': 1, | |
516 'test_case': 'Test.1', | |
517 }, | |
518 { | |
519 'crashed': True, | |
520 'duration': 0., | |
521 'output': '[ RUN ] Test.2\r', | |
522 'returncode': 1, | |
523 'test_case': 'Test.2', | |
524 }, | |
525 ] | |
526 actual = process_output(data, ['Test.1', 'Test.2']) | |
527 self.assertEqual(expected, actual) | |
528 | |
529 def test_process_output_crashes(self): | |
530 data = '[ RUN ] Test.1\n[ RUN ] Test.2\n' | |
531 expected = [ | |
532 { | |
533 'crashed': True, | |
534 'duration': 0, | |
535 'output': '[ RUN ] Test.1\n', | |
536 'returncode': 1, | |
537 'test_case': 'Test.1', | |
538 }, | |
539 { | |
540 'crashed': True, | |
541 'duration': 0., | |
542 'output': '[ RUN ] Test.2\n', | |
543 'returncode': 1, | |
544 'test_case': 'Test.2', | |
545 }, | |
546 ] | |
547 actual = process_output(data, ['Test.1', 'Test.2']) | |
548 self.assertEqual(expected, actual) | |
549 | |
550 def test_process_output_ok(self): | |
551 data = ( | |
552 '[ RUN ] Test.1\n' | |
553 '[ OK ] Test.1 (1000 ms)\n' | |
554 '[ RUN ] Test.2\n' | |
555 '[ OK ] Test.2 (2000 ms)\n') | |
556 expected = [ | |
557 { | |
558 'duration': 1., | |
559 'output': '[ RUN ] Test.1\n[ OK ] Test.1 (1000 ms)\n', | |
560 'returncode': 0, | |
561 'test_case': 'Test.1', | |
562 }, | |
563 { | |
564 'duration': 2., | |
565 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n', | |
566 'returncode': 0, | |
567 'test_case': 'Test.2', | |
568 }, | |
569 ] | |
570 actual = process_output(data, ['Test.1', 'Test.2']) | |
571 self.assertEqual(expected, actual) | |
572 | |
573 def test_process_output_no_time(self): | |
574 data = ( | |
575 '[ RUN ] Test.1\n' | |
576 '[ OK ] Test.1\n') | |
577 expected = [ | |
578 { | |
579 'duration': 0., | |
580 'output': '[ RUN ] Test.1\n[ OK ] Test.1\n', | |
581 'returncode': 0, | |
582 'test_case': 'Test.1', | |
583 }, | |
584 ] | |
585 actual = process_output(data, ['Test.1']) | |
586 self.assertEqual(expected, actual) | |
587 | |
588 def test_process_output_fail_1(self): | |
589 data = ( | |
590 '[ RUN ] Test.1\n' | |
591 '[ FAILED ] Test.1 (1000 ms)\n' | |
592 '[ RUN ] Test.2\n' | |
593 '[ OK ] Test.2 (2000 ms)\n') | |
594 expected = [ | |
595 { | |
596 'duration': 1., | |
597 'output': '[ RUN ] Test.1\n[ FAILED ] Test.1 (1000 ms)\n', | |
598 'returncode': 1, | |
599 'test_case': 'Test.1', | |
600 }, | |
601 { | |
602 'duration': 2., | |
603 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n', | |
604 'returncode': 0, | |
605 'test_case': 'Test.2', | |
606 }, | |
607 ] | |
608 actual = process_output(data, ['Test.1', 'Test.2']) | |
609 self.assertEqual(expected, actual) | |
610 | |
611 def test_process_output_crash_ok(self): | |
612 data = ( | |
613 '[ RUN ] Test.1\n' | |
614 'blah blah crash.\n' | |
615 '[ RUN ] Test.2\n' | |
616 '[ OK ] Test.2 (2000 ms)\n') | |
617 expected = [ | |
618 { | |
619 'crashed': True, | |
620 'duration': 0., | |
621 'output': '[ RUN ] Test.1\nblah blah crash.\n', | |
622 'returncode': 1, | |
623 'test_case': 'Test.1', | |
624 }, | |
625 { | |
626 'duration': 2., | |
627 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n', | |
628 'returncode': 0, | |
629 'test_case': 'Test.2', | |
630 }, | |
631 ] | |
632 actual = process_output(data, ['Test.1', 'Test.2']) | |
633 self.assertEqual(expected, actual) | |
634 | |
635 def test_process_output_crash_garbage_ok(self): | |
636 data = ( | |
637 '[ RUN ] Test.1\n' | |
638 'blah blah crash[ RUN ] Test.2\n' | |
639 '[ OK ] Test.2 (2000 ms)\n') | |
640 expected = [ | |
641 { | |
642 'crashed': True, | |
643 'duration': 0., | |
644 'output': '[ RUN ] Test.1\nblah blah crash', | |
645 'returncode': 1, | |
646 'test_case': 'Test.1', | |
647 }, | |
648 { | |
649 'duration': 2., | |
650 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n', | |
651 'returncode': 0, | |
652 'test_case': 'Test.2', | |
653 }, | |
654 ] | |
655 actual = process_output(data, ['Test.1', 'Test.2']) | |
656 self.assertEqual(expected, actual) | |
657 | |
658 def test_process_output_missing(self): | |
659 data = ( | |
660 '[ RUN ] Test.2\n' | |
661 '[ OK ] Test.2 (2000 ms)\n') | |
662 expected = [ | |
663 { | |
664 'duration': 2., | |
665 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n', | |
666 'returncode': 0, | |
667 'test_case': 'Test.2', | |
668 }, | |
669 { | |
670 'duration': None, | |
671 'output': None, | |
672 'returncode': None, | |
673 'test_case': 'Test.1', | |
674 }, | |
675 ] | |
676 actual = process_output(data, ['Test.1', 'Test.2']) | |
677 self.assertEqual(expected, actual) | |
678 | |
679 def test_process_output_no_lr(self): | |
680 data = ( | |
681 '[ RUN ] Test.1\n' | |
682 'junkjunk[ OK ] Test.1 (2000 ms)\n') | |
683 expected = [ | |
684 { | |
685 'duration': 2., | |
686 'output': | |
687 '[ RUN ] Test.1\njunkjunk[ OK ] Test.1 (2000 ms)\n', | |
688 'returncode': 0, | |
689 'test_case': 'Test.1', | |
690 }, | |
691 ] | |
692 actual = process_output(data, ['Test.1']) | |
693 self.assertEqual(expected, actual) | |
694 | |
695 def test_process_output_fake_ok(self): | |
696 data = ( | |
697 '[ RUN ] TestFix.TestCase\n' | |
698 '[1:2/3:WARNING:extension_apitest.cc(169)] Workaround for 177163, ' | |
699 'prematurely stopping test\n' | |
700 '[ OK ] X (1000ms total)\n' | |
701 '[0523/230139:ERROR:test_launcher.cc(365)] Test timeout (45000 ms) ' | |
702 'exceeded for ExtensionManagementApiTest.ManagementPolicyProhibited\n') | |
703 expected = [ | |
704 { | |
705 'crashed': True, | |
706 'duration': 0, | |
707 'output': data, | |
708 'returncode': 1, | |
709 'test_case': 'TestFix.TestCase', | |
710 }, | |
711 ] | |
712 actual = process_output(data, ['TestFix.TestCase']) | |
713 self.assertEqual(expected, actual) | |
714 | |
715 def test_calc_cluster_default(self): | |
716 expected = [ | |
717 ((0, 1), 0), | |
718 ((1, 1), 1), | |
719 ((1, 10), 1), | |
720 ((10, 10), 1), | |
721 ((10, 100), 1), | |
722 | |
723 # Most VMs have 4 or 8 CPUs, asserts the values are sane. | |
724 ((5, 1), 5), | |
725 ((5, 2), 2), | |
726 ((5, 4), 1), | |
727 ((5, 8), 1), | |
728 ((10, 1), 2), | |
729 ((10, 4), 2), | |
730 ((10, 8), 1), | |
731 ((100, 1), 10), | |
732 ((100, 4), 5), | |
733 ((100, 8), 3), | |
734 ((1000, 1), 10), | |
735 ((1000, 4), 10), | |
736 ((1000, 8), 10), | |
737 ((3000, 1), 10), | |
738 ((3000, 4), 10), | |
739 ((3000, 8), 10), | |
740 ] | |
741 actual = [ | |
742 ((num_test_cases, jobs), | |
743 run_test_cases.calc_cluster_default(num_test_cases, jobs)) | |
744 for (num_test_cases, jobs), _ in expected | |
745 ] | |
746 self.assertEqual(expected, actual) | |
747 | |
748 | |
749 class RunTestCasesTmp(unittest.TestCase): | |
750 def setUp(self): | |
751 super(RunTestCasesTmp, self).setUp() | |
752 self.tmpdir = tempfile.mkdtemp(prefix='run_test_cases') | |
753 | |
754 def tearDown(self): | |
755 shutil.rmtree(self.tmpdir) | |
756 super(RunTestCasesTmp, self).tearDown() | |
757 | |
758 def test_xml(self): | |
759 # Test when a file is already present that the index is increasing | |
760 # accordingly. | |
761 open(os.path.join(self.tmpdir, 'a.xml'), 'w').close() | |
762 open(os.path.join(self.tmpdir, 'a_0.xml'), 'w').close() | |
763 open(os.path.join(self.tmpdir, 'a_1.xml'), 'w').close() | |
764 self.assertEqual( | |
765 os.path.join(self.tmpdir, 'a_2.xml'), | |
766 run_test_cases.gen_gtest_output_dir(self.tmpdir, 'xml:a.xml')) | |
767 | |
768 def test_xml_default(self): | |
769 self.assertEqual( | |
770 os.path.join(self.tmpdir, 'test_detail.xml'), | |
771 run_test_cases.gen_gtest_output_dir(self.tmpdir, 'xml')) | |
772 | |
773 def test_gen_xml(self): | |
774 data = { | |
775 "duration": 7.895771026611328, | |
776 "expected": 500, | |
777 "fail": [ | |
778 "SecurityTest.MemoryAllocationRestrictionsCalloc", | |
779 "SecurityTest.MemoryAllocationRestrictionsNewArray" | |
780 ], | |
781 "flaky": [ | |
782 "AlignedMemoryTest.DynamicAllocation", | |
783 "AlignedMemoryTest.ScopedDynamicAllocation", | |
784 ], | |
785 "missing": [ | |
786 "AlignedMemoryTest.DynamicAllocation", | |
787 "AlignedMemoryTest.ScopedDynamicAllocation", | |
788 ], | |
789 "success": [ | |
790 "AtExitTest.Param", | |
791 "AtExitTest.Task", | |
792 ], | |
793 "test_cases": { | |
794 "AlignedMemoryTest.DynamicAllocation": [ | |
795 { | |
796 "duration": 0.044817209243774414, | |
797 "output": "blah blah", | |
798 "returncode": 1, | |
799 "test_case": "AlignedMemoryTest.DynamicAllocation" | |
800 } | |
801 ], | |
802 "AlignedMemoryTest.ScopedDynamicAllocation": [ | |
803 { | |
804 "duration": 0.03273797035217285, | |
805 "output": "blah blah", | |
806 "returncode": 0, | |
807 "test_case": "AlignedMemoryTest.ScopedDynamicAllocation" | |
808 } | |
809 ], | |
810 "Foo.Bar": [ | |
811 { | |
812 "duration": None, | |
813 "output": None, | |
814 "returncode": None, | |
815 "test_case": "Foo.Bar", | |
816 }, | |
817 ], | |
818 }, | |
819 } | |
820 expected = ( | |
821 '<?xml version="1.0" ?>\n' | |
822 '<testsuites name="AllTests" tests="500" time="7.895771" ' | |
823 'timestamp="1996">\n' | |
824 '<testsuite name="AlignedMemoryTest" tests="2">\n' | |
825 ' <testcase classname="AlignedMemoryTest" name="DynamicAllocation" ' | |
826 'status="run" time="0.044817">\n' | |
827 '<failure><![CDATA[blah blah]]></failure></testcase>\n' | |
828 ' <testcase classname="AlignedMemoryTest" ' | |
829 'name="ScopedDynamicAllocation" status="run" time="0.032738"/>\n' | |
830 '</testsuite>\n' | |
831 '<testsuite name="Foo" tests="1">\n' | |
832 ' <testcase classname="Foo" name="Bar" ' | |
833 'status="run" time="0.000000">\n' | |
834 '<failure><![CDATA[]]></failure></testcase>\n' | |
835 '</testsuite>\n' | |
836 '</testsuites>') | |
837 filepath = os.path.join(self.tmpdir, 'foo.xml') | |
838 run_test_cases.dump_results_as_xml(filepath, data, '1996') | |
839 with open(filepath, 'rb') as f: | |
840 actual = f.read() | |
841 self.assertEqual(expected.splitlines(), actual.splitlines()) | |
842 | |
843 | |
844 if __name__ == '__main__': | |
845 VERBOSE = '-v' in sys.argv | |
846 logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR) | |
847 unittest.TestCase.maxDiff = 5000 | |
848 unittest.main() | |
OLD | NEW |