Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(375)

Side by Side Diff: mojo/tools/pylib/mojo_python_tests_runner.py

Issue 509863002: Remove python bindings unittests from the build. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « mojo/python/tests/test_core.py ('k') | mojo/tools/run_mojo_python_bindings_tests.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4
5 import argparse
6 import json
7 import os
8 import sys
9 import time
10 import unittest
11
12
13 class MojoPythonTestRunner(object):
14 """Helper class to run python tests on the bots."""
15
16 def __init__(self, test_dir):
17 self._test_dir = test_dir
18
19 def run(self):
20 parser = argparse.ArgumentParser()
21 parser.usage = 'run_mojo_python_tests.py [options] [tests...]'
22 parser.add_argument('-v', '--verbose', action='count', default=0)
23 parser.add_argument('--metadata', action='append', default=[],
24 help=('optional key=value metadata that will be stored '
25 'in the results files (can be used for revision '
26 'numbers, etc.)'))
27 parser.add_argument('--write-full-results-to', metavar='FILENAME',
28 action='store',
29 help='path to write the list of full results to.')
30 parser.add_argument('tests', nargs='*')
31
32 self.add_custom_commandline_options(parser)
33 args = parser.parse_args()
34 self.apply_customization(args)
35
36 bad_metadata = False
37 for val in args.metadata:
38 if '=' not in val:
39 print >> sys.stderr, ('Error: malformed metadata "%s"' % val)
40 bad_metadata = True
41 if bad_metadata:
42 print >> sys.stderr
43 parser.print_help()
44 return 2
45
46 chromium_src_dir = os.path.join(os.path.dirname(__file__),
47 os.pardir,
48 os.pardir,
49 os.pardir)
50
51 loader = unittest.loader.TestLoader()
52 print "Running Python unit tests under %s..." % self._test_dir
53
54 pylib_dir = os.path.join(chromium_src_dir, self._test_dir)
55 if args.tests:
56 if pylib_dir not in sys.path:
57 sys.path.append(pylib_dir)
58 suite = unittest.TestSuite()
59 for test_name in args.tests:
60 suite.addTests(loader.loadTestsFromName(test_name))
61 else:
62 suite = loader.discover(pylib_dir, pattern='*_unittest.py')
63
64 runner = unittest.runner.TextTestRunner(verbosity=(args.verbose + 1))
65 result = runner.run(suite)
66
67 full_results = _FullResults(suite, result, args.metadata)
68 if args.write_full_results_to:
69 with open(args.write_full_results_to, 'w') as fp:
70 json.dump(full_results, fp, indent=2)
71 fp.write("\n")
72
73 return 0 if result.wasSuccessful() else 1
74
75 def add_custom_commandline_options(self, parser):
76 """Allow to add custom option to the runner script."""
77 pass
78
79 def apply_customization(self, args):
80 """Allow to apply any customization to the runner."""
81 pass
82
83
84 TEST_SEPARATOR = '.'
85
86
87 def _FullResults(suite, result, metadata):
88 """Convert the unittest results to the Chromium JSON test result format.
89
90 This matches run-webkit-tests (the layout tests) and the flakiness dashboard.
91 """
92
93 full_results = {}
94 full_results['interrupted'] = False
95 full_results['path_delimiter'] = TEST_SEPARATOR
96 full_results['version'] = 3
97 full_results['seconds_since_epoch'] = time.time()
98 for md in metadata:
99 key, val = md.split('=', 1)
100 full_results[key] = val
101
102 all_test_names = _AllTestNames(suite)
103 failed_test_names = _FailedTestNames(result)
104
105 full_results['num_failures_by_type'] = {
106 'FAIL': len(failed_test_names),
107 'PASS': len(all_test_names) - len(failed_test_names),
108 }
109
110 full_results['tests'] = {}
111
112 for test_name in all_test_names:
113 value = {}
114 value['expected'] = 'PASS'
115 if test_name in failed_test_names:
116 value['actual'] = 'FAIL'
117 value['is_unexpected'] = True
118 else:
119 value['actual'] = 'PASS'
120 _AddPathToTrie(full_results['tests'], test_name, value)
121
122 return full_results
123
124
125 def _AllTestNames(suite):
126 test_names = []
127 # _tests is protected pylint: disable=W0212
128 for test in suite._tests:
129 if isinstance(test, unittest.suite.TestSuite):
130 test_names.extend(_AllTestNames(test))
131 else:
132 test_names.append(test.id())
133 return test_names
134
135
136 def _FailedTestNames(result):
137 return set(test.id() for test, _ in result.failures + result.errors)
138
139
140 def _AddPathToTrie(trie, path, value):
141 if TEST_SEPARATOR not in path:
142 trie[path] = value
143 return
144 directory, rest = path.split(TEST_SEPARATOR, 1)
145 if directory not in trie:
146 trie[directory] = {}
147 _AddPathToTrie(trie[directory], rest, value)
OLDNEW
« no previous file with comments | « mojo/python/tests/test_core.py ('k') | mojo/tools/run_mojo_python_bindings_tests.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698