OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2014 The Chromium Authors. All rights reserved. | 2 # Copyright 2014 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 import optparse | 6 import argparse |
| 7 import json |
7 import os | 8 import os |
8 import re | 9 import re |
9 import sys | 10 import sys |
| 11 import time |
10 import unittest | 12 import unittest |
11 | 13 |
12 | 14 |
13 def main(): | 15 def main(): |
14 parser = optparse.OptionParser() | 16 parser = argparse.ArgumentParser() |
15 parser.usage = 'run_mojo_python_tests.py [options] [tests...]' | 17 parser.usage = 'run_mojo_python_tests.py [options] [tests...]' |
16 parser.add_option('-v', '--verbose', action='count', default=0) | 18 parser.add_argument('-v', '--verbose', action='count', default=0) |
17 parser.add_option('--unexpected-failures', metavar='FILENAME', action='store', | 19 parser.add_argument('--metadata', action='append', default=[], |
18 help=('path to write a list of any tests that fail ' | 20 help=('optional key=value metadata that will be stored ' |
19 'unexpectedly.')) | 21 'in the results files (can be used for revision ' |
20 parser.epilog = ('If --unexpected-failures is passed, a list of the tests ' | 22 'numbers, etc.)')) |
21 'that failed (one per line) will be written to the file. ' | 23 parser.add_argument('--write-full-results-to', metavar='FILENAME', |
22 'If no tests failed, the file will be truncated (empty). ' | 24 action='store', |
23 'If the test run did not completely properly, or something ' | 25 help='path to write the list of full results to.') |
24 'else weird happened, any existing file will be left ' | 26 parser.add_argument('tests', nargs='*') |
25 'unmodified. ' | 27 |
26 'If --unexpected-failures is *not* passed, any existing ' | 28 args = parser.parse_args() |
27 'file will be ignored and left unmodified.') | 29 |
28 options, args = parser.parse_args() | 30 bad_metadata = False |
| 31 for val in args.metadata: |
| 32 if '=' not in val: |
| 33 print >> sys.stderr, ('Error: malformed metadata "%s"' % val) |
| 34 bad_metadata = True |
| 35 if bad_metadata: |
| 36 print >> sys.stderr |
| 37 parser.print_help() |
| 38 return 2 |
29 | 39 |
30 chromium_src_dir = os.path.join(os.path.dirname(__file__), | 40 chromium_src_dir = os.path.join(os.path.dirname(__file__), |
31 os.pardir, | 41 os.pardir, |
32 os.pardir) | 42 os.pardir) |
33 | 43 |
34 loader = unittest.loader.TestLoader() | 44 loader = unittest.loader.TestLoader() |
35 print "Running Python unit tests under mojo/public/tools/bindings/pylib ..." | 45 print "Running Python unit tests under mojo/public/tools/bindings/pylib ..." |
36 | 46 |
37 pylib_dir = os.path.join(chromium_src_dir, 'mojo', 'public', | 47 pylib_dir = os.path.join(chromium_src_dir, 'mojo', 'public', |
38 'tools', 'bindings', 'pylib') | 48 'tools', 'bindings', 'pylib') |
39 if args: | 49 if args.tests: |
40 if not pylib_dir in sys.path: | 50 if pylib_dir not in sys.path: |
41 sys.path.append(pylib_dir) | 51 sys.path.append(pylib_dir) |
42 suite = unittest.TestSuite() | 52 suite = unittest.TestSuite() |
43 for test_name in args: | 53 for test_name in args: |
44 suite.addTests(loader.loadTestsFromName(test_name)) | 54 suite.addTests(loader.loadTestsFromName(test_name)) |
45 else: | 55 else: |
46 suite = loader.discover(pylib_dir, pattern='*_unittest.py') | 56 suite = loader.discover(pylib_dir, pattern='*_unittest.py') |
47 | 57 |
48 runner = unittest.runner.TextTestRunner(verbosity=(options.verbose + 1)) | 58 runner = unittest.runner.TextTestRunner(verbosity=(args.verbose + 1)) |
49 result = runner.run(suite) | 59 result = runner.run(suite) |
50 | 60 |
51 if options.unexpected_failures: | 61 full_results = _FullResults(suite, result, args.metadata) |
52 WriteUnexpectedFailures(result, options.unexpected_failures) | 62 if args.write_full_results_to: |
| 63 with open(args.write_full_results_to, 'w') as fp: |
| 64 json.dump(full_results, fp, indent=2) |
| 65 fp.write("\n") |
53 | 66 |
54 return 0 if result.wasSuccessful() else 1 | 67 return 0 if result.wasSuccessful() else 1 |
55 | 68 |
56 | 69 |
57 def WriteUnexpectedFailures(result, path): | 70 TEST_SEPARATOR = '.' |
58 | 71 |
| 72 |
| 73 def _FullResults(suite, result, metadata): |
| 74 """Convert the unittest results to the Chromium JSON test result format. |
| 75 |
| 76 This matches run-webkit-tests (the layout tests) and the flakiness dashboard. |
| 77 """ |
| 78 |
| 79 full_results = {} |
| 80 full_results['interrupted'] = False |
| 81 full_results['path_delimiter'] = TEST_SEPARATOR |
| 82 full_results['version'] = 3 |
| 83 full_results['seconds_since_epoch'] = time.time() |
| 84 for md in metadata: |
| 85 key, val = md.split('=', 1) |
| 86 full_results[key] = val |
| 87 |
| 88 all_test_names = _AllTestNames(suite) |
| 89 failed_test_names = _FailedTestNames(result) |
| 90 |
| 91 full_results['num_failures_by_type'] = { |
| 92 'Failure': len(failed_test_names), |
| 93 'Pass': len(all_test_names) - len(failed_test_names), |
| 94 } |
| 95 |
| 96 full_results['tests'] = {} |
| 97 |
| 98 for test_name in all_test_names: |
| 99 value = { |
| 100 'expected': 'PASS', |
| 101 'actual': 'FAIL' if (test_name in failed_test_names) else 'FAIL', |
| 102 } |
| 103 _AddPathToTrie(full_results['tests'], test_name, value) |
| 104 |
| 105 return full_results |
| 106 |
| 107 |
| 108 def _AllTestNames(suite): |
| 109 test_names = [] |
| 110 # _tests is protected pylint: disable=W0212 |
| 111 for test in suite._tests: |
| 112 if isinstance(test, unittest.suite.TestSuite): |
| 113 test_names.extend(_AllTestNames(test)) |
| 114 else: |
| 115 test_names.append(_UnitTestName(test)) |
| 116 return test_names |
| 117 |
| 118 |
| 119 def _FailedTestNames(result): |
| 120 failed_test_names = set() |
| 121 for (test, _) in result.failures + result.errors: |
| 122 failed_test_names.add(_UnitTestName(test)) |
| 123 return failed_test_names |
| 124 |
| 125 |
| 126 def _AddPathToTrie(trie, path, value): |
| 127 if TEST_SEPARATOR not in path: |
| 128 trie[path] = value |
| 129 return |
| 130 directory, rest = path.split(TEST_SEPARATOR, 1) |
| 131 if directory not in trie: |
| 132 trie[directory] = {} |
| 133 _AddPathToTrie(trie[directory], rest, value) |
| 134 |
| 135 |
| 136 _UNITTEST_NAME_REGEX = re.compile("(\w+) \(([\w.]+)\)") |
| 137 |
| 138 |
| 139 def _UnitTestName(test): |
59 # This regex and UnitTestName() extracts the test_name in a way | 140 # This regex and UnitTestName() extracts the test_name in a way |
60 # that can be handed back to the loader successfully. | 141 # that can be handed back to the loader successfully. |
61 | 142 m = _UNITTEST_NAME_REGEX.match(str(test)) |
62 test_description = re.compile("(\w+) \(([\w.]+)\)") | 143 assert m, "could not find test name from test description %s" % str(test) |
63 | 144 return "%s.%s" % (m.group(2), m.group(1)) |
64 def UnitTestName(test): | |
65 m = test_description.match(str(test)) | |
66 return "%s.%s" % (m.group(2), m.group(1)) | |
67 | |
68 with open(path, 'w') as fp: | |
69 for (test, _) in result.failures + result.errors: | |
70 fp.write(UnitTestName(test) + '\n') | |
71 | 145 |
72 | 146 |
73 if __name__ == '__main__': | 147 if __name__ == '__main__': |
74 sys.exit(main()) | 148 sys.exit(main()) |
OLD | NEW |