Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(204)

Side by Side Diff: tests/run_test_cases_test.py

Issue 19917006: Move all googletest related scripts into googletest/ (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/tools/swarm_client
Patch Set: Remove unnecessary pylint warning disable Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « tests/run_test_cases_smoke_test.py ('k') | tests/trace_inputs_smoke_test.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
5
6 import logging
7 import os
8 import shutil
9 import subprocess
10 import sys
11 import tempfile
12 import unittest
13
14 ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
15 sys.path.insert(0, ROOT_DIR)
16
17 import run_test_cases
18
19 OUTPUT = os.path.join(ROOT_DIR, 'tests', 'run_test_cases', 'output.py')
20
21
22 def to_native_eol(string):
23 if string is None:
24 return string
25 if sys.platform == 'win32':
26 return string.replace('\n', '\r\n')
27 return string
28
29
30 class ListTestCasesTest(unittest.TestCase):
31 def test_shards(self):
32 test_cases = (
33 (range(10), 10, 0, 1),
34
35 ([0, 1], 5, 0, 3),
36 ([2, 3], 5, 1, 3),
37 ([4 ], 5, 2, 3),
38
39 ([0], 5, 0, 7),
40 ([1], 5, 1, 7),
41 ([2], 5, 2, 7),
42 ([3], 5, 3, 7),
43 ([4], 5, 4, 7),
44 ([ ], 5, 5, 7),
45 ([ ], 5, 6, 7),
46
47 ([0, 1], 4, 0, 2),
48 ([2, 3], 4, 1, 2),
49 )
50 for expected, range_length, index, shards in test_cases:
51 result = run_test_cases.filter_shards(range(range_length), index, shards)
52 self.assertEqual(
53 expected, result, (result, expected, range_length, index, shards))
54
55
56 def process_output(content, test_cases):
57 return list(
58 run_test_cases.process_output(content.splitlines(True), test_cases))
59
60
61 class RunTestCasesSlow(unittest.TestCase):
62 def test_call_with_timeout(self):
63 timedout = 1 if sys.platform == 'win32' else -9
64 # Format is:
65 # ( (cmd, stderr_pipe, timeout), (stdout, stderr, returncode) ), ...
66 test_data = [
67 # 0 means no timeout, like None.
68 (
69 (['out_sleeping', '0.001', 'out_slept', 'err_print'], None, 0),
70 ('Sleeping.\nSlept.\n', None, 0),
71 ),
72 (
73 (['err_print'], subprocess.STDOUT, 0),
74 ('printing', None, 0),
75 ),
76 (
77 (['err_print'], subprocess.PIPE, 0),
78 ('', 'printing', 0),
79 ),
80
81 # On a loaded system, this can be tight.
82 (
83 (['out_sleeping', 'out_flush', '100', 'out_slept'], None, 0.5),
84 ('Sleeping.\n', '', timedout),
85 ),
86 (
87 (
88 # Note that err_flush is necessary on Windows but not on the other
89 # OSes. This means the likelihood of missing stderr output from a
90 # killed child process on Windows is much higher than on other OSes.
91 [
92 'out_sleeping', 'out_flush', 'err_print', 'err_flush', '100',
93 'out_slept',
94 ],
95 subprocess.PIPE,
96 0.5),
97 ('Sleeping.\n', 'printing', timedout),
98 ),
99
100 (
101 (['out_sleeping', '0.001', 'out_slept'], None, 100),
102 ('Sleeping.\nSlept.\n', '', 0),
103 ),
104 ]
105 for i, (data, expected) in enumerate(test_data):
106 stdout, stderr, code, duration = run_test_cases.call_with_timeout(
107 [sys.executable, OUTPUT] + data[0],
108 stderr=data[1],
109 timeout=data[2])
110 self.assertTrue(duration > 0.0001, (data, duration))
111 self.assertEqual(
112 (i, stdout, stderr, code),
113 (i,
114 to_native_eol(expected[0]),
115 to_native_eol(expected[1]),
116 expected[2]))
117
118 # Try again with universal_newlines=True.
119 stdout, stderr, code, duration = run_test_cases.call_with_timeout(
120 [sys.executable, OUTPUT] + data[0],
121 stderr=data[1],
122 timeout=data[2],
123 universal_newlines=True)
124 self.assertTrue(duration > 0.0001, (data, duration))
125 self.assertEqual(
126 (i, stdout, stderr, code),
127 (i,) + expected)
128
129 def test_recv_any(self):
130 combinations = [
131 {
132 'cmd': ['out_print', 'err_print'],
133 'stdout': None,
134 'stderr': None,
135 'expected': {},
136 },
137 {
138 'cmd': ['out_print', 'err_print'],
139 'stdout': None,
140 'stderr': subprocess.STDOUT,
141 'expected': {},
142 },
143
144 {
145 'cmd': ['out_print'],
146 'stdout': subprocess.PIPE,
147 'stderr': subprocess.PIPE,
148 'expected': {'stdout': 'printing'},
149 },
150 {
151 'cmd': ['out_print'],
152 'stdout': subprocess.PIPE,
153 'stderr': None,
154 'expected': {'stdout': 'printing'},
155 },
156 {
157 'cmd': ['out_print'],
158 'stdout': subprocess.PIPE,
159 'stderr': subprocess.STDOUT,
160 'expected': {'stdout': 'printing'},
161 },
162
163 {
164 'cmd': ['err_print'],
165 'stdout': subprocess.PIPE,
166 'stderr': subprocess.PIPE,
167 'expected': {'stderr': 'printing'},
168 },
169 {
170 'cmd': ['err_print'],
171 'stdout': None,
172 'stderr': subprocess.PIPE,
173 'expected': {'stderr': 'printing'},
174 },
175 {
176 'cmd': ['err_print'],
177 'stdout': subprocess.PIPE,
178 'stderr': subprocess.STDOUT,
179 'expected': {'stdout': 'printing'},
180 },
181
182 {
183 'cmd': ['out_print', 'err_print'],
184 'stdout': subprocess.PIPE,
185 'stderr': subprocess.PIPE,
186 'expected': {'stderr': 'printing', 'stdout': 'printing'},
187 },
188 {
189 'cmd': ['out_print', 'err_print'],
190 'stdout': subprocess.PIPE,
191 'stderr': subprocess.STDOUT,
192 'expected': {'stdout': 'printingprinting'},
193 },
194 ]
195 for i, data in enumerate(combinations):
196 cmd = [sys.executable, OUTPUT] + data['cmd']
197 p = run_test_cases.Popen(
198 cmd, stdout=data['stdout'], stderr=data['stderr'])
199 actual = {}
200 while p.poll() is None:
201 pipe, d = p.recv_any()
202 if pipe is not None:
203 actual.setdefault(pipe, '')
204 actual[pipe] += d
205 while True:
206 pipe, d = p.recv_any()
207 if pipe is None:
208 break
209 actual.setdefault(pipe, '')
210 actual[pipe] += d
211 self.assertEqual(data['expected'], actual, (i, data['expected'], actual))
212 self.assertEqual((None, None), p.recv_any())
213 self.assertEqual(0, p.returncode)
214
215 @staticmethod
216 def _get_output_sleep_proc(flush, env, duration):
217 command = [
218 'import sys,time',
219 'print(\'A\')',
220 ]
221 if flush:
222 # Sadly, this doesn't work otherwise in some combination.
223 command.append('sys.stdout.flush()')
224 command.extend((
225 'time.sleep(%s)' % duration,
226 'print(\'B\')',
227 ))
228 return run_test_cases.Popen(
229 [
230 sys.executable,
231 '-c',
232 ';'.join(command),
233 ],
234 stdout=subprocess.PIPE,
235 universal_newlines=True,
236 env=env)
237
238 def test_yield_any_None(self):
239 for duration in (0.05, 0.1, 0.5, 2):
240 try:
241 proc = self._get_output_sleep_proc(True, {}, duration)
242 expected = [
243 'A\n',
244 'B\n',
245 ]
246 for p, data in proc.yield_any(timeout=None):
247 self.assertEqual('stdout', p)
248 self.assertEqual(expected.pop(0), data)
249 self.assertEqual(0, proc.returncode)
250 self.assertEqual([], expected)
251 break
252 except AssertionError:
253 if duration != 2:
254 print('Sleeping rocks. trying more slowly.')
255 continue
256 raise
257
258 def test_yield_any_0(self):
259 for duration in (0.05, 0.1, 0.5, 2):
260 try:
261 proc = self._get_output_sleep_proc(True, {}, duration)
262 expected = [
263 'A\n',
264 'B\n',
265 ]
266 got_none = False
267 for p, data in proc.yield_any(timeout=0):
268 if not p:
269 got_none = True
270 continue
271 self.assertEqual('stdout', p)
272 self.assertEqual(expected.pop(0), data)
273 self.assertEqual(0, proc.returncode)
274 self.assertEqual([], expected)
275 self.assertEqual(True, got_none)
276 break
277 except AssertionError:
278 if duration != 2:
279 print('Sleeping rocks. trying more slowly.')
280 continue
281 raise
282
283 def test_recv_any_None(self):
284 values = (
285 (True, ['A\n', 'B\n'], {}),
286 (False, ['A\nB\n'], {}),
287 (False, ['A\n', 'B\n'], {'PYTHONUNBUFFERED': 'x'}),
288 )
289 for flush, exp, env in values:
290 for duration in (0.05, 0.1, 0.5, 2):
291 expected = exp[:]
292 try:
293 proc = self._get_output_sleep_proc(flush, env, duration)
294 while True:
295 p, data = proc.recv_any(timeout=None)
296 if not p:
297 break
298 self.assertEqual('stdout', p)
299 if not expected:
300 self.fail(data)
301 e = expected.pop(0)
302 if env:
303 # Buffering is truly a character-level and could get items
304 # individually. This is usually seen only during high load, try
305 # compiling at the same time to reproduce it.
306 if len(data) < len(e):
307 expected.insert(0, e[len(data):])
308 e = e[:len(data)]
309 self.assertEqual(e, data)
310 # Contrary to yield_any() or recv_any(0), wait() needs to be used
311 # here.
312 proc.wait()
313 self.assertEqual([], expected)
314 self.assertEqual(0, proc.returncode)
315 except AssertionError:
316 if duration != 2:
317 print('Sleeping rocks. trying more slowly.')
318 continue
319 raise
320
321 def test_recv_any_0(self):
322 values = (
323 (True, ['A\n', 'B\n'], {}),
324 (False, ['A\nB\n'], {}),
325 (False, ['A\n', 'B\n'], {'PYTHONUNBUFFERED': 'x'}),
326 )
327 for i, (flush, exp, env) in enumerate(values):
328 for duration in (0.1, 0.5, 2):
329 expected = exp[:]
330 try:
331 proc = self._get_output_sleep_proc(flush, env, duration)
332 got_none = False
333 while True:
334 p, data = proc.recv_any(timeout=0)
335 if not p:
336 if proc.poll() is None:
337 got_none = True
338 continue
339 break
340 self.assertEqual('stdout', p)
341 if not expected:
342 self.fail(data)
343 e = expected.pop(0)
344 if sys.platform == 'win32':
345 # Buffering is truly a character-level on Windows and could get
346 # items individually.
347 if len(data) < len(e):
348 expected.insert(0, e[len(data):])
349 e = e[:len(data)]
350 self.assertEqual(e, data)
351
352 self.assertEqual(0, proc.returncode)
353 self.assertEqual([], expected)
354 self.assertEqual(True, got_none)
355 except Exception as e:
356 if duration != 2:
357 print('Sleeping rocks. trying more slowly.')
358 continue
359 print >> sys.stderr, 'Failure at index %d' % i
360 raise
361
362 def test_gtest_filter(self):
363 old = run_test_cases.run_test_cases
364 exe = os.path.join(ROOT_DIR, 'tests', 'gtest_fake', 'gtest_fake_pass.py')
365 def expect(
366 executable, cwd, test_cases, jobs, timeout, clusters, retries,
367 run_all, max_failures, no_cr, gtest_output, result_file, verbose):
368 self.assertEqual(run_test_cases.fix_python_path([exe]), executable)
369 self.assertEqual(os.getcwd(), cwd)
370 # They are in reverse order due to test shuffling.
371 self.assertEqual(['Foo.Bar1', 'Foo.Bar/3'], test_cases)
372 self.assertEqual(run_test_cases.run_isolated.num_processors(), jobs)
373 self.assertEqual(75, timeout)
374 self.assertEqual(None, clusters)
375 self.assertEqual(2, retries)
376 self.assertEqual(None, run_all)
377 self.assertEqual(None, no_cr)
378 self.assertEqual('', gtest_output)
379 self.assertEqual(None, max_failures)
380 self.assertEqual(exe + '.run_test_cases', result_file)
381 self.assertFalse(verbose)
382 return 89
383
384 try:
385 run_test_cases.run_test_cases = expect
386 result = run_test_cases.main([exe, '--gtest_filter=Foo.Bar*-*.Bar2'])
387 self.assertEqual(89, result)
388 finally:
389 run_test_cases.run_test_cases = old
390
391 def test_ResetableTimeout(self):
392 self.assertTrue(run_test_cases.ResetableTimeout(0))
393 a = run_test_cases.ResetableTimeout(1)
394 self.assertEqual(1., float(a))
395 count = 0
396 for count in xrange(1000000):
397 value = float(a)
398 self.assertTrue(value >= 1., value)
399 if value != 1.:
400 break
401 a.reset()
402 self.assertTrue(value > 1., value)
403 # Assume no 10s jank.
404 self.assertTrue(value < 10., value)
405 self.assertTrue(count < 1000000, count)
406 self.assertTrue(count > 0, count)
407
408
409 class RunTestCasesFast(unittest.TestCase):
410 def test_convert_to_lines(self):
411 data = [
412 (
413 ('blah'),
414 ['blah'],
415 ),
416 (
417 ('blah\n'),
418 ['blah\n'],
419 ),
420 (
421 ('blah', '\n'),
422 ['blah\n'],
423 ),
424 (
425 ('\n'),
426 ['\n'],
427 ),
428 (
429 ('blah blah\nboo'),
430 ['blah blah\n', 'boo'],
431 ),
432 (
433 ('b', 'lah blah\nboo'),
434 ['blah blah\n', 'boo'],
435 ),
436 ]
437 for generator, expected in data:
438 self.assertEqual(
439 expected,
440 list(run_test_cases.convert_to_lines(generator)))
441
442 def testRunSome(self):
443 tests = [
444 # Try with named arguments. Accepts 3*1 failures.
445 (
446 run_test_cases.RunSome(
447 expected_count=10,
448 retries=2,
449 min_failures=1,
450 max_failure_ratio=0.001,
451 max_failures=None),
452 [False] * 4),
453 # Same without named arguments.
454 (run_test_cases.RunSome( 10, 2, 1, 0.001, None), [False] * 4),
455
456 (run_test_cases.RunSome( 10, 0, 1, 0.001, None), [False] * 2),
457 (run_test_cases.RunSome( 10, 0, 1, 0.010, None), [False] * 2),
458
459 # For low expected_count value, retries * min_failures is the minimum
460 # bound of accepted failures.
461 (run_test_cases.RunSome( 10, 2, 1, 0.010, None), [False] * 4),
462 (run_test_cases.RunSome( 10, 2, 1, 0.020, None), [False] * 4),
463 (run_test_cases.RunSome( 10, 2, 1, 0.050, None), [False] * 4),
464 (run_test_cases.RunSome( 10, 2, 1, 0.100, None), [False] * 4),
465 (run_test_cases.RunSome( 10, 2, 1, 0.110, None), [False] * 4),
466
467 # Allows expected_count + retries failures at maximum.
468 (run_test_cases.RunSome( 10, 2, 1, 0.200, None), [False] * 6),
469 (run_test_cases.RunSome( 10, 2, 1, 0.999, None), [False] * 30),
470
471 # The asympthote is nearing max_failure_ratio for large expected_count
472 # values.
473 (run_test_cases.RunSome(1000, 2, 1, 0.050, None), [False] * 150),
474 ]
475 for index, (decider, rounds) in enumerate(tests):
476 for index2, r in enumerate(rounds):
477 self.assertFalse(decider.should_stop(), (index, index2, str(decider)))
478 decider.got_result(r)
479 self.assertTrue(decider.should_stop(), (index, str(decider)))
480
481 def testStatsInfinite(self):
482 decider = run_test_cases.RunAll()
483 for _ in xrange(200):
484 self.assertFalse(decider.should_stop())
485 decider.got_result(False)
486
487 def test_process_output_garbage(self):
488 data = 'garbage\n'
489 expected = [
490 {
491 'duration': None,
492 'output': None,
493 'returncode': None,
494 'test_case': 'Test.1',
495 },
496 {
497 'duration': None,
498 'output': None,
499 'returncode': None,
500 'test_case': 'Test.2',
501 },
502 ]
503 actual = process_output(data, ['Test.1', 'Test.2'])
504 self.assertEqual(expected, actual)
505
506 def test_process_output_crash_cr(self):
507 # CR only is supported. Let's assume a crash.
508 data = '[ RUN ] Test.1\r[ RUN ] Test.2\r'
509 expected = [
510 {
511 'crashed': True,
512 'duration': 0,
513 'output': '[ RUN ] Test.1\r',
514 'returncode': 1,
515 'test_case': 'Test.1',
516 },
517 {
518 'crashed': True,
519 'duration': 0.,
520 'output': '[ RUN ] Test.2\r',
521 'returncode': 1,
522 'test_case': 'Test.2',
523 },
524 ]
525 actual = process_output(data, ['Test.1', 'Test.2'])
526 self.assertEqual(expected, actual)
527
528 def test_process_output_crashes(self):
529 data = '[ RUN ] Test.1\n[ RUN ] Test.2\n'
530 expected = [
531 {
532 'crashed': True,
533 'duration': 0,
534 'output': '[ RUN ] Test.1\n',
535 'returncode': 1,
536 'test_case': 'Test.1',
537 },
538 {
539 'crashed': True,
540 'duration': 0.,
541 'output': '[ RUN ] Test.2\n',
542 'returncode': 1,
543 'test_case': 'Test.2',
544 },
545 ]
546 actual = process_output(data, ['Test.1', 'Test.2'])
547 self.assertEqual(expected, actual)
548
549 def test_process_output_ok(self):
550 data = (
551 '[ RUN ] Test.1\n'
552 '[ OK ] Test.1 (1000 ms)\n'
553 '[ RUN ] Test.2\n'
554 '[ OK ] Test.2 (2000 ms)\n')
555 expected = [
556 {
557 'duration': 1.,
558 'output': '[ RUN ] Test.1\n[ OK ] Test.1 (1000 ms)\n',
559 'returncode': 0,
560 'test_case': 'Test.1',
561 },
562 {
563 'duration': 2.,
564 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n',
565 'returncode': 0,
566 'test_case': 'Test.2',
567 },
568 ]
569 actual = process_output(data, ['Test.1', 'Test.2'])
570 self.assertEqual(expected, actual)
571
572 def test_process_output_no_time(self):
573 data = (
574 '[ RUN ] Test.1\n'
575 '[ OK ] Test.1\n')
576 expected = [
577 {
578 'duration': 0.,
579 'output': '[ RUN ] Test.1\n[ OK ] Test.1\n',
580 'returncode': 0,
581 'test_case': 'Test.1',
582 },
583 ]
584 actual = process_output(data, ['Test.1'])
585 self.assertEqual(expected, actual)
586
587 def test_process_output_fail_1(self):
588 data = (
589 '[ RUN ] Test.1\n'
590 '[ FAILED ] Test.1 (1000 ms)\n'
591 '[ RUN ] Test.2\n'
592 '[ OK ] Test.2 (2000 ms)\n')
593 expected = [
594 {
595 'duration': 1.,
596 'output': '[ RUN ] Test.1\n[ FAILED ] Test.1 (1000 ms)\n',
597 'returncode': 1,
598 'test_case': 'Test.1',
599 },
600 {
601 'duration': 2.,
602 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n',
603 'returncode': 0,
604 'test_case': 'Test.2',
605 },
606 ]
607 actual = process_output(data, ['Test.1', 'Test.2'])
608 self.assertEqual(expected, actual)
609
610 def test_process_output_crash_ok(self):
611 data = (
612 '[ RUN ] Test.1\n'
613 'blah blah crash.\n'
614 '[ RUN ] Test.2\n'
615 '[ OK ] Test.2 (2000 ms)\n')
616 expected = [
617 {
618 'crashed': True,
619 'duration': 0.,
620 'output': '[ RUN ] Test.1\nblah blah crash.\n',
621 'returncode': 1,
622 'test_case': 'Test.1',
623 },
624 {
625 'duration': 2.,
626 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n',
627 'returncode': 0,
628 'test_case': 'Test.2',
629 },
630 ]
631 actual = process_output(data, ['Test.1', 'Test.2'])
632 self.assertEqual(expected, actual)
633
634 def test_process_output_crash_garbage_ok(self):
635 data = (
636 '[ RUN ] Test.1\n'
637 'blah blah crash[ RUN ] Test.2\n'
638 '[ OK ] Test.2 (2000 ms)\n')
639 expected = [
640 {
641 'crashed': True,
642 'duration': 0.,
643 'output': '[ RUN ] Test.1\nblah blah crash',
644 'returncode': 1,
645 'test_case': 'Test.1',
646 },
647 {
648 'duration': 2.,
649 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n',
650 'returncode': 0,
651 'test_case': 'Test.2',
652 },
653 ]
654 actual = process_output(data, ['Test.1', 'Test.2'])
655 self.assertEqual(expected, actual)
656
657 def test_process_output_missing(self):
658 data = (
659 '[ RUN ] Test.2\n'
660 '[ OK ] Test.2 (2000 ms)\n')
661 expected = [
662 {
663 'duration': 2.,
664 'output': '[ RUN ] Test.2\n[ OK ] Test.2 (2000 ms)\n',
665 'returncode': 0,
666 'test_case': 'Test.2',
667 },
668 {
669 'duration': None,
670 'output': None,
671 'returncode': None,
672 'test_case': 'Test.1',
673 },
674 ]
675 actual = process_output(data, ['Test.1', 'Test.2'])
676 self.assertEqual(expected, actual)
677
678 def test_process_output_no_lr(self):
679 data = (
680 '[ RUN ] Test.1\n'
681 'junkjunk[ OK ] Test.1 (2000 ms)\n')
682 expected = [
683 {
684 'duration': 2.,
685 'output':
686 '[ RUN ] Test.1\njunkjunk[ OK ] Test.1 (2000 ms)\n',
687 'returncode': 0,
688 'test_case': 'Test.1',
689 },
690 ]
691 actual = process_output(data, ['Test.1'])
692 self.assertEqual(expected, actual)
693
694 def test_process_output_fake_ok(self):
695 data = (
696 '[ RUN ] TestFix.TestCase\n'
697 '[1:2/3:WARNING:extension_apitest.cc(169)] Workaround for 177163, '
698 'prematurely stopping test\n'
699 '[ OK ] X (1000ms total)\n'
700 '[0523/230139:ERROR:test_launcher.cc(365)] Test timeout (45000 ms) '
701 'exceeded for ExtensionManagementApiTest.ManagementPolicyProhibited\n')
702 expected = [
703 {
704 'crashed': True,
705 'duration': 0,
706 'output': data,
707 'returncode': 1,
708 'test_case': 'TestFix.TestCase',
709 },
710 ]
711 actual = process_output(data, ['TestFix.TestCase'])
712 self.assertEqual(expected, actual)
713
714 def test_calc_cluster_default(self):
715 expected = [
716 ((0, 1), 0),
717 ((1, 1), 1),
718 ((1, 10), 1),
719 ((10, 10), 1),
720 ((10, 100), 1),
721
722 # Most VMs have 4 or 8 CPUs, asserts the values are sane.
723 ((5, 1), 5),
724 ((5, 2), 2),
725 ((5, 4), 1),
726 ((5, 8), 1),
727 ((10, 1), 2),
728 ((10, 4), 2),
729 ((10, 8), 1),
730 ((100, 1), 10),
731 ((100, 4), 5),
732 ((100, 8), 3),
733 ((1000, 1), 10),
734 ((1000, 4), 10),
735 ((1000, 8), 10),
736 ((3000, 1), 10),
737 ((3000, 4), 10),
738 ((3000, 8), 10),
739 ]
740 actual = [
741 ((num_test_cases, jobs),
742 run_test_cases.calc_cluster_default(num_test_cases, jobs))
743 for (num_test_cases, jobs), _ in expected
744 ]
745 self.assertEqual(expected, actual)
746
747
748 class RunTestCasesTmp(unittest.TestCase):
749 def setUp(self):
750 super(RunTestCasesTmp, self).setUp()
751 self.tmpdir = tempfile.mkdtemp(prefix='run_test_cases')
752
753 def tearDown(self):
754 shutil.rmtree(self.tmpdir)
755 super(RunTestCasesTmp, self).tearDown()
756
757 def test_xml(self):
758 # Test when a file is already present that the index is increasing
759 # accordingly.
760 open(os.path.join(self.tmpdir, 'a.xml'), 'w').close()
761 open(os.path.join(self.tmpdir, 'a_0.xml'), 'w').close()
762 open(os.path.join(self.tmpdir, 'a_1.xml'), 'w').close()
763 self.assertEqual(
764 os.path.join(self.tmpdir, 'a_2.xml'),
765 run_test_cases.gen_gtest_output_dir(self.tmpdir, 'xml:a.xml'))
766
767 def test_xml_default(self):
768 self.assertEqual(
769 os.path.join(self.tmpdir, 'test_detail.xml'),
770 run_test_cases.gen_gtest_output_dir(self.tmpdir, 'xml'))
771
772 def test_gen_xml(self):
773 data = {
774 "duration": 7.895771026611328,
775 "expected": 500,
776 "fail": [
777 "SecurityTest.MemoryAllocationRestrictionsCalloc",
778 "SecurityTest.MemoryAllocationRestrictionsNewArray"
779 ],
780 "flaky": [
781 "AlignedMemoryTest.DynamicAllocation",
782 "AlignedMemoryTest.ScopedDynamicAllocation",
783 ],
784 "missing": [
785 "AlignedMemoryTest.DynamicAllocation",
786 "AlignedMemoryTest.ScopedDynamicAllocation",
787 ],
788 "success": [
789 "AtExitTest.Param",
790 "AtExitTest.Task",
791 ],
792 "test_cases": {
793 "AlignedMemoryTest.DynamicAllocation": [
794 {
795 "duration": 0.044817209243774414,
796 "output": "blah blah",
797 "returncode": 1,
798 "test_case": "AlignedMemoryTest.DynamicAllocation"
799 }
800 ],
801 "AlignedMemoryTest.ScopedDynamicAllocation": [
802 {
803 "duration": 0.03273797035217285,
804 "output": "blah blah",
805 "returncode": 0,
806 "test_case": "AlignedMemoryTest.ScopedDynamicAllocation"
807 }
808 ],
809 "Foo.Bar": [
810 {
811 "duration": None,
812 "output": None,
813 "returncode": None,
814 "test_case": "Foo.Bar",
815 },
816 ],
817 },
818 }
819 expected = (
820 '<?xml version="1.0" ?>\n'
821 '<testsuites name="AllTests" tests="500" time="7.895771" '
822 'timestamp="1996">\n'
823 '<testsuite name="AlignedMemoryTest" tests="2">\n'
824 ' <testcase classname="AlignedMemoryTest" name="DynamicAllocation" '
825 'status="run" time="0.044817">\n'
826 '<failure><![CDATA[blah blah]]></failure></testcase>\n'
827 ' <testcase classname="AlignedMemoryTest" '
828 'name="ScopedDynamicAllocation" status="run" time="0.032738"/>\n'
829 '</testsuite>\n'
830 '<testsuite name="Foo" tests="1">\n'
831 ' <testcase classname="Foo" name="Bar" '
832 'status="run" time="0.000000">\n'
833 '<failure><![CDATA[]]></failure></testcase>\n'
834 '</testsuite>\n'
835 '</testsuites>')
836 filepath = os.path.join(self.tmpdir, 'foo.xml')
837 run_test_cases.dump_results_as_xml(filepath, data, '1996')
838 with open(filepath, 'rb') as f:
839 actual = f.read()
840 self.assertEqual(expected.splitlines(), actual.splitlines())
841
842
843 class FakeProgress(object):
844 @staticmethod
845 def print_update():
846 pass
847
848
849 class WorkerPoolTest(unittest.TestCase):
850 def test_normal(self):
851 mapper = lambda value: -value
852 progress = FakeProgress()
853 with run_test_cases.ThreadPool(progress, 8, 8, 0) as pool:
854 for i in range(32):
855 pool.add_task(0, mapper, i)
856 results = pool.join()
857 self.assertEqual(range(-31, 1), sorted(results))
858
859 def test_exception(self):
860 class FearsomeException(Exception):
861 pass
862 def mapper(value):
863 raise FearsomeException(value)
864 task_added = False
865 try:
866 progress = FakeProgress()
867 with run_test_cases.ThreadPool(progress, 8, 8, 0) as pool:
868 pool.add_task(0, mapper, 0)
869 task_added = True
870 pool.join()
871 self.fail()
872 except FearsomeException:
873 self.assertEqual(True, task_added)
874
875
876 if __name__ == '__main__':
877 VERBOSE = '-v' in sys.argv
878 logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR)
879 unittest.TestCase.maxDiff = 5000
880 unittest.main()
OLDNEW
« no previous file with comments | « tests/run_test_cases_smoke_test.py ('k') | tests/trace_inputs_smoke_test.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698