OLD | NEW |
1 # Copyright 2016 The Chromium Authors. All rights reserved. | 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import argparse | 5 import argparse |
6 import inspect | 6 import inspect |
7 import json | 7 import json |
8 import re | 8 import re |
| 9 import time |
9 import unittest | 10 import unittest |
10 | 11 |
11 from telemetry.core import discover | 12 from telemetry.core import discover |
12 from telemetry.internal.browser import browser_options | 13 from telemetry.internal.browser import browser_options |
13 from telemetry.internal.util import binary_manager | 14 from telemetry.internal.util import binary_manager |
14 from telemetry.testing import options_for_unittests | 15 from telemetry.testing import options_for_unittests |
15 from telemetry.testing import serially_executed_browser_test_case | 16 from telemetry.testing import serially_executed_browser_test_case |
16 | 17 |
17 | 18 |
18 def ProcessCommandLineOptions(test_class, args): | 19 def ProcessCommandLineOptions(test_class, project_config, args): |
19 options = browser_options.BrowserFinderOptions() | 20 options = browser_options.BrowserFinderOptions() |
20 options.browser_type = 'any' | 21 options.browser_type = 'any' |
21 parser = options.CreateParser(test_class.__doc__) | 22 parser = options.CreateParser(test_class.__doc__) |
22 test_class.AddCommandlineArgs(parser) | 23 test_class.AddCommandlineArgs(parser) |
| 24 # Set the default chrome root variable. This is required for the |
| 25 # Android browser finder to function properly. |
| 26 parser.set_defaults(chrome_root=project_config.default_chrome_root) |
23 finder_options, positional_args = parser.parse_args(args) | 27 finder_options, positional_args = parser.parse_args(args) |
24 finder_options.positional_args = positional_args | 28 finder_options.positional_args = positional_args |
25 options_for_unittests.Push(finder_options) | 29 options_for_unittests.Push(finder_options) |
26 return finder_options | 30 return finder_options |
27 | 31 |
28 | 32 |
29 def ValidateDistinctNames(browser_test_classes): | 33 def _ValidateDistinctNames(browser_test_classes): |
30 names_to_test_classes = {} | 34 names_to_test_classes = {} |
31 for cl in browser_test_classes: | 35 for cl in browser_test_classes: |
32 name = cl.Name() | 36 name = cl.Name() |
33 if name in names_to_test_classes: | 37 if name in names_to_test_classes: |
34 raise Exception('Test name %s is duplicated between %s and %s' % ( | 38 raise Exception('Test name %s is duplicated between %s and %s' % ( |
35 name, repr(cl), repr(names_to_test_classes[name]))) | 39 name, repr(cl), repr(names_to_test_classes[name]))) |
36 names_to_test_classes[name] = cl | 40 names_to_test_classes[name] = cl |
37 | 41 |
38 | 42 |
39 def GenerateTestMethod(based_method, args): | 43 def _GenerateTestMethod(based_method, args): |
40 return lambda self: based_method(self, *args) | 44 return lambda self: based_method(self, *args) |
41 | 45 |
42 | 46 |
43 _INVALID_TEST_NAME_RE = re.compile(r'[^a-zA-Z0-9_]') | 47 _INVALID_TEST_NAME_RE = re.compile(r'[^a-zA-Z0-9_]') |
44 def ValidateTestMethodname(test_name): | 48 def _ValidateTestMethodname(test_name): |
45 assert not bool(_INVALID_TEST_NAME_RE.search(test_name)) | 49 assert not bool(_INVALID_TEST_NAME_RE.search(test_name)) |
46 | 50 |
47 | 51 |
48 def TestRangeForShard(total_shards, shard_index, num_tests): | 52 def _TestRangeForShard(total_shards, shard_index, num_tests): |
49 """Returns a 2-tuple containing the start (inclusive) and ending | 53 """Returns a 2-tuple containing the start (inclusive) and ending |
50 (exclusive) indices of the tests that should be run, given that | 54 (exclusive) indices of the tests that should be run, given that |
51 |num_tests| tests are split across |total_shards| shards, and that | 55 |num_tests| tests are split across |total_shards| shards, and that |
52 |shard_index| is currently being run. | 56 |shard_index| is currently being run. |
53 """ | 57 """ |
54 assert num_tests >= 0 | 58 assert num_tests >= 0 |
55 assert total_shards >= 1 | 59 assert total_shards >= 1 |
56 assert shard_index >= 0 and shard_index < total_shards, ( | 60 assert shard_index >= 0 and shard_index < total_shards, ( |
57 'shard_index (%d) must be >= 0 and < total_shards (%d)' % | 61 'shard_index (%d) must be >= 0 and < total_shards (%d)' % |
58 (shard_index, total_shards)) | 62 (shard_index, total_shards)) |
(...skipping 11 matching lines...) Expand all Loading... |
70 0, shard_index - num_earlier_shards_with_one_extra_test) | 74 0, shard_index - num_earlier_shards_with_one_extra_test) |
71 num_earlier_tests = ( | 75 num_earlier_tests = ( |
72 num_earlier_shards_with_one_extra_test * (floored_tests_per_shard + 1) + | 76 num_earlier_shards_with_one_extra_test * (floored_tests_per_shard + 1) + |
73 num_earlier_shards_with_no_extra_tests * floored_tests_per_shard) | 77 num_earlier_shards_with_no_extra_tests * floored_tests_per_shard) |
74 tests_for_this_shard = floored_tests_per_shard | 78 tests_for_this_shard = floored_tests_per_shard |
75 if shard_index < remaining_tests: | 79 if shard_index < remaining_tests: |
76 tests_for_this_shard += 1 | 80 tests_for_this_shard += 1 |
77 return (num_earlier_tests, num_earlier_tests + tests_for_this_shard) | 81 return (num_earlier_tests, num_earlier_tests + tests_for_this_shard) |
78 | 82 |
79 | 83 |
| 84 def _MedianTestTime(test_times): |
| 85 times = test_times.values() |
| 86 times.sort() |
| 87 if len(times) == 0: |
| 88 return 0 |
| 89 halfLen = len(times) / 2 |
| 90 if len(times) % 2: |
| 91 return times[halfLen] |
| 92 else: |
| 93 return 0.5 * (times[halfLen - 1] + times[halfLen]) |
| 94 |
| 95 |
| 96 def _TestTime(test, test_times, default_test_time): |
| 97 return test_times.get(test.shortName()) or default_test_time |
| 98 |
| 99 |
| 100 def _DebugShardDistributions(shards, test_times): |
| 101 for i, s in enumerate(shards): |
| 102 num_tests = len(s) |
| 103 if test_times: |
| 104 median = _MedianTestTime(test_times) |
| 105 shard_time = 0.0 |
| 106 for t in s: |
| 107 shard_time += _TestTime(t, test_times, median) |
| 108 print 'shard %d: %d seconds (%d tests)' % (i, shard_time, num_tests) |
| 109 else: |
| 110 print 'shard %d: %d tests (unknown duration)' % (i, num_tests) |
| 111 |
| 112 |
| 113 def _SplitShardsByTime(test_cases, total_shards, test_times, |
| 114 debug_shard_distributions): |
| 115 median = _MedianTestTime(test_times) |
| 116 shards = [] |
| 117 for i in xrange(total_shards): |
| 118 shards.append({'total_time': 0.0, 'tests': []}) |
| 119 test_cases.sort(key=lambda t: _TestTime(t, test_times, median), |
| 120 reverse=True) |
| 121 |
| 122 # The greedy algorithm has been empirically tested on the WebGL 2.0 |
| 123 # conformance tests' times, and results in an essentially perfect |
| 124 # shard distribution of 530 seconds per shard. In the same scenario, |
| 125 # round-robin scheduling resulted in shard times spread between 502 |
| 126 # and 592 seconds, and the current alphabetical sharding resulted in |
| 127 # shard times spread between 44 and 1591 seconds. |
| 128 |
| 129 # Greedy scheduling. O(m*n), where m is the number of shards and n |
| 130 # is the number of test cases. |
| 131 for t in test_cases: |
| 132 min_shard_index = 0 |
| 133 min_shard_time = None |
| 134 for i in xrange(total_shards): |
| 135 if min_shard_time is None or shards[i]['total_time'] < min_shard_time: |
| 136 min_shard_index = i |
| 137 min_shard_time = shards[i]['total_time'] |
| 138 shards[min_shard_index]['tests'].append(t) |
| 139 shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median) |
| 140 |
| 141 res = [s['tests'] for s in shards] |
| 142 if debug_shard_distributions: |
| 143 _DebugShardDistributions(res, test_times) |
| 144 |
| 145 return res |
| 146 |
| 147 |
80 _TEST_GENERATOR_PREFIX = 'GenerateTestCases_' | 148 _TEST_GENERATOR_PREFIX = 'GenerateTestCases_' |
81 | 149 |
82 def LoadTests(test_class, finder_options, filter_regex_str, | 150 def _LoadTests(test_class, finder_options, filter_regex_str, |
83 total_shards, shard_index): | 151 filter_tests_after_sharding, |
| 152 total_shards, shard_index, test_times, |
| 153 debug_shard_distributions): |
84 test_cases = [] | 154 test_cases = [] |
85 filter_regex = re.compile(filter_regex_str) | 155 real_regex = re.compile(filter_regex_str) |
| 156 noop_regex = re.compile('') |
| 157 if filter_tests_after_sharding: |
| 158 filter_regex = noop_regex |
| 159 post_filter_regex = real_regex |
| 160 else: |
| 161 filter_regex = real_regex |
| 162 post_filter_regex = noop_regex |
86 for name, method in inspect.getmembers( | 163 for name, method in inspect.getmembers( |
87 test_class, predicate=inspect.ismethod): | 164 test_class, predicate=inspect.ismethod): |
88 if name.startswith('test'): | 165 if name.startswith('test'): |
89 # Do not allow method names starting with "test" in these | 166 # Do not allow method names starting with "test" in these |
90 # subclasses, to avoid collisions with Python's unit test runner. | 167 # subclasses, to avoid collisions with Python's unit test runner. |
91 raise Exception('Name collision with Python\'s unittest runner: %s' % | 168 raise Exception('Name collision with Python\'s unittest runner: %s' % |
92 name) | 169 name) |
93 elif name.startswith('Test') and filter_regex.search(name): | 170 elif name.startswith('Test') and filter_regex.search(name): |
94 # Pass these through for the time being. We may want to rethink | 171 # Pass these through for the time being. We may want to rethink |
95 # how they are handled in the future. | 172 # how they are handled in the future. |
96 test_cases.append(test_class(name)) | 173 test_cases.append(test_class(name)) |
97 elif name.startswith(_TEST_GENERATOR_PREFIX): | 174 elif name.startswith(_TEST_GENERATOR_PREFIX): |
98 based_method_name = name[len(_TEST_GENERATOR_PREFIX):] | 175 based_method_name = name[len(_TEST_GENERATOR_PREFIX):] |
99 assert hasattr(test_class, based_method_name), ( | 176 assert hasattr(test_class, based_method_name), ( |
100 '%s is specified but %s based method %s does not exist' % | 177 '%s is specified but %s based method %s does not exist' % |
101 name, based_method_name) | 178 name, based_method_name) |
102 based_method = getattr(test_class, based_method_name) | 179 based_method = getattr(test_class, based_method_name) |
103 for generated_test_name, args in method(finder_options): | 180 for generated_test_name, args in method(finder_options): |
104 ValidateTestMethodname(generated_test_name) | 181 _ValidateTestMethodname(generated_test_name) |
105 if filter_regex.search(generated_test_name): | 182 if filter_regex.search(generated_test_name): |
106 setattr(test_class, generated_test_name, GenerateTestMethod( | 183 setattr(test_class, generated_test_name, _GenerateTestMethod( |
107 based_method, args)) | 184 based_method, args)) |
108 test_cases.append(test_class(generated_test_name)) | 185 test_cases.append(test_class(generated_test_name)) |
109 test_cases.sort(key=lambda t: t.id()) | 186 if test_times: |
110 test_range = TestRangeForShard(total_shards, shard_index, len(test_cases)) | 187 # Assign tests to shards. |
111 return test_cases[test_range[0]:test_range[1]] | 188 shards = _SplitShardsByTime(test_cases, total_shards, test_times, |
| 189 debug_shard_distributions) |
| 190 return [t for t in shards[shard_index] |
| 191 if post_filter_regex.search(t.shortName())] |
| 192 else: |
| 193 test_cases.sort(key=lambda t: t.shortName()) |
| 194 test_range = _TestRangeForShard(total_shards, shard_index, len(test_cases)) |
| 195 if debug_shard_distributions: |
| 196 tmp_shards = [] |
| 197 for i in xrange(total_shards): |
| 198 tmp_range = _TestRangeForShard(total_shards, i, len(test_cases)) |
| 199 tmp_shards.append(test_cases[tmp_range[0]:tmp_range[1]]) |
| 200 # Can edit the code to get 'test_times' passed in here for |
| 201 # debugging and comparison purposes. |
| 202 _DebugShardDistributions(tmp_shards, None) |
| 203 return [t for t in test_cases[test_range[0]:test_range[1]] |
| 204 if post_filter_regex.search(t.shortName())] |
112 | 205 |
113 | 206 |
114 class TestRunOptions(object): | 207 class TestRunOptions(object): |
115 def __init__(self): | 208 def __init__(self): |
116 self.verbosity = 2 | 209 self.verbosity = 2 |
117 | 210 |
118 | 211 |
119 class BrowserTestResult(unittest.TextTestResult): | 212 class BrowserTestResult(unittest.TextTestResult): |
120 def __init__(self, *args, **kwargs): | 213 def __init__(self, *args, **kwargs): |
121 super(BrowserTestResult, self).__init__(*args, **kwargs) | 214 super(BrowserTestResult, self).__init__(*args, **kwargs) |
122 self.successes = [] | 215 self.successes = [] |
| 216 self.times = {} |
| 217 self._current_test_start_time = 0 |
123 | 218 |
124 def addSuccess(self, test): | 219 def addSuccess(self, test): |
125 super(BrowserTestResult, self).addSuccess(test) | 220 super(BrowserTestResult, self).addSuccess(test) |
126 self.successes.append(test) | 221 self.successes.append(test) |
127 | 222 |
| 223 def startTest(self, test): |
| 224 super(BrowserTestResult, self).startTest(test) |
| 225 self._current_test_start_time = time.time() |
| 226 |
| 227 def stopTest(self, test): |
| 228 super(BrowserTestResult, self).stopTest(test) |
| 229 self.times[test.shortName()] = (time.time() - self._current_test_start_time) |
| 230 |
128 | 231 |
129 def Run(project_config, test_run_options, args): | 232 def Run(project_config, test_run_options, args): |
130 binary_manager.InitDependencyManager(project_config.client_configs) | 233 binary_manager.InitDependencyManager(project_config.client_configs) |
131 parser = argparse.ArgumentParser(description='Run a browser test suite') | 234 parser = argparse.ArgumentParser(description='Run a browser test suite') |
132 parser.add_argument('test', type=str, help='Name of the test suite to run') | 235 parser.add_argument('test', type=str, help='Name of the test suite to run') |
133 parser.add_argument( | 236 parser.add_argument( |
134 '--write-abbreviated-json-results-to', metavar='FILENAME', action='store', | 237 '--write-abbreviated-json-results-to', metavar='FILENAME', action='store', |
135 help=('If specified, writes the full results to that path in json form.')) | 238 help=('If specified, writes the full results to that path in json form.')) |
136 parser.add_argument('--test-filter', type=str, default='', action='store', | 239 parser.add_argument('--test-filter', type=str, default='', action='store', |
137 help='Run only tests whose names match the given filter regexp.') | 240 help='Run only tests whose names match the given filter regexp.') |
138 parser.add_argument('--total-shards', default=1, type=int, | 241 parser.add_argument('--total-shards', default=1, type=int, |
139 help='Total number of shards being used for this test run. (The user of ' | 242 help='Total number of shards being used for this test run. (The user of ' |
140 'this script is responsible for spawning all of the shards.)') | 243 'this script is responsible for spawning all of the shards.)') |
141 parser.add_argument('--shard-index', default=0, type=int, | 244 parser.add_argument('--shard-index', default=0, type=int, |
142 help='Shard index (0..total_shards-1) of this test run.') | 245 help='Shard index (0..total_shards-1) of this test run.') |
| 246 parser.add_argument( |
| 247 '--filter-tests-after-sharding', default=False, action='store_true', |
| 248 help=('Apply the test filter after tests are split for sharding. Useful ' |
| 249 'for reproducing bugs related to the order in which tests run.')) |
| 250 parser.add_argument( |
| 251 '--read-abbreviated-json-results-from', metavar='FILENAME', |
| 252 action='store', help=( |
| 253 'If specified, reads abbreviated results from that path in json form. ' |
| 254 'The file format is that written by ' |
| 255 '--write-abbreviated-json-results-to. This information is used to more ' |
| 256 'evenly distribute tests among shards.')) |
| 257 parser.add_argument('--debug-shard-distributions', |
| 258 action='store_true', default=False, |
| 259 help='Print debugging information about the shards\' test distributions') |
| 260 |
143 option, extra_args = parser.parse_known_args(args) | 261 option, extra_args = parser.parse_known_args(args) |
144 | 262 |
145 for start_dir in project_config.start_dirs: | 263 for start_dir in project_config.start_dirs: |
146 modules_to_classes = discover.DiscoverClasses( | 264 modules_to_classes = discover.DiscoverClasses( |
147 start_dir, project_config.top_level_dir, | 265 start_dir, project_config.top_level_dir, |
148 base_class=serially_executed_browser_test_case.SeriallyBrowserTestCase) | 266 base_class=serially_executed_browser_test_case. |
| 267 SeriallyExecutedBrowserTestCase) |
149 browser_test_classes = modules_to_classes.values() | 268 browser_test_classes = modules_to_classes.values() |
150 | 269 |
151 ValidateDistinctNames(browser_test_classes) | 270 _ValidateDistinctNames(browser_test_classes) |
152 | 271 |
153 test_class = None | 272 test_class = None |
154 for cl in browser_test_classes: | 273 for cl in browser_test_classes: |
155 if cl.Name() == option.test: | 274 if cl.Name() == option.test: |
156 test_class = cl | 275 test_class = cl |
157 break | 276 break |
158 | 277 |
159 if not test_class: | 278 if not test_class: |
160 print 'Cannot find test class with name matching %s' % option.test | 279 print 'Cannot find test class with name matching %s' % option.test |
161 print 'Available tests: %s' % '\n'.join( | 280 print 'Available tests: %s' % '\n'.join( |
162 cl.Name() for cl in browser_test_classes) | 281 cl.Name() for cl in browser_test_classes) |
163 return 1 | 282 return 1 |
164 | 283 |
165 options = ProcessCommandLineOptions(test_class, extra_args) | 284 options = ProcessCommandLineOptions(test_class, project_config, extra_args) |
| 285 |
| 286 test_times = None |
| 287 if option.read_abbreviated_json_results_from: |
| 288 with open(option.read_abbreviated_json_results_from, 'r') as f: |
| 289 abbr_results = json.load(f) |
| 290 test_times = abbr_results.get('times') |
166 | 291 |
167 suite = unittest.TestSuite() | 292 suite = unittest.TestSuite() |
168 for test in LoadTests(test_class, options, option.test_filter, | 293 for test in _LoadTests(test_class, options, option.test_filter, |
169 option.total_shards, option.shard_index): | 294 option.filter_tests_after_sharding, |
| 295 option.total_shards, option.shard_index, |
| 296 test_times, option.debug_shard_distributions): |
170 suite.addTest(test) | 297 suite.addTest(test) |
171 | 298 |
172 results = unittest.TextTestRunner( | 299 results = unittest.TextTestRunner( |
173 verbosity=test_run_options.verbosity, | 300 verbosity=test_run_options.verbosity, |
174 resultclass=BrowserTestResult).run(suite) | 301 resultclass=BrowserTestResult).run(suite) |
175 if option.write_abbreviated_json_results_to: | 302 if option.write_abbreviated_json_results_to: |
176 with open(option.write_abbreviated_json_results_to, 'w') as f: | 303 with open(option.write_abbreviated_json_results_to, 'w') as f: |
177 json_results = {'failures': [], 'successes': [], 'valid': True} | 304 json_results = {'failures': [], 'successes': [], |
| 305 'times': {}, 'valid': True} |
178 # Treat failures and errors identically in the JSON | 306 # Treat failures and errors identically in the JSON |
179 # output. Failures are those which cooperatively fail using | 307 # output. Failures are those which cooperatively fail using |
180 # Python's unittest APIs; errors are those which abort the test | 308 # Python's unittest APIs; errors are those which abort the test |
181 # case early with an execption. | 309 # case early with an execption. |
182 failures = [] | 310 failures = [] |
183 failures.extend(results.failures) | 311 for fail, _ in results.failures + results.errors: |
184 failures.extend(results.errors) | 312 # When errors in thrown in individual test method or setUp or tearDown, |
185 failures.sort(key=lambda entry: entry[0].id()) | 313 # fail would be an instance of unittest.TestCase. |
186 for (failed_test_case, _) in failures: | 314 if isinstance(fail, unittest.TestCase): |
187 json_results['failures'].append(failed_test_case.id()) | 315 failures.append(fail.shortName()) |
| 316 else: |
| 317 # When errors in thrown in setupClass or tearDownClass, an instance of |
| 318 # _ErrorHolder is is placed in results.errors list. We use the id() |
| 319 # as failure name in this case since shortName() is not available. |
| 320 failures.append(fail.id()) |
| 321 failures = sorted(list(failures)) |
| 322 for failure_id in failures: |
| 323 json_results['failures'].append(failure_id) |
188 for passed_test_case in results.successes: | 324 for passed_test_case in results.successes: |
189 json_results['successes'].append(passed_test_case.id()) | 325 json_results['successes'].append(passed_test_case.shortName()) |
| 326 json_results['times'].update(results.times) |
190 json.dump(json_results, f) | 327 json.dump(json_results, f) |
191 return len(results.failures + results.errors) | 328 return len(results.failures + results.errors) |
OLD | NEW |