OLD | NEW |
(Empty) | |
| 1 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 |
| 5 import fnmatch |
| 6 import re |
| 7 import sys |
| 8 import json |
| 9 |
| 10 from telemetry.core import discover |
| 11 from telemetry.internal.browser import browser_options |
| 12 from telemetry.internal.platform import android_device |
| 13 from telemetry.internal.util import binary_manager |
| 14 from telemetry.testing import browser_test_context |
| 15 from telemetry.testing import serially_executed_browser_test_case |
| 16 |
| 17 import typ |
| 18 from typ import arg_parser |
| 19 |
| 20 DEFAULT_LOG_FORMAT = ( |
| 21 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d ' |
| 22 '%(message)s') |
| 23 |
| 24 |
| 25 TEST_SUFFIXES = ['*_test.py', '*_tests.py', '*_unittest.py', '*_unittests.py'] |
| 26 |
| 27 |
| 28 def ProcessCommandLineOptions(test_class, default_chrome_root, args): |
| 29 options = browser_options.BrowserFinderOptions() |
| 30 options.browser_type = 'any' |
| 31 parser = options.CreateParser(test_class.__doc__) |
| 32 test_class.AddCommandlineArgs(parser) |
| 33 # Set the default chrome root variable. This is required for the |
| 34 # Android browser finder to function properly. |
| 35 if default_chrome_root: |
| 36 parser.set_defaults(chrome_root=default_chrome_root) |
| 37 finder_options, positional_args = parser.parse_args(args) |
| 38 finder_options.positional_args = positional_args |
| 39 return finder_options |
| 40 |
| 41 |
| 42 def _ValidateDistinctNames(browser_test_classes): |
| 43 names_to_test_classes = {} |
| 44 for cl in browser_test_classes: |
| 45 name = cl.Name() |
| 46 if name in names_to_test_classes: |
| 47 raise Exception('Test name %s is duplicated between %s and %s' % ( |
| 48 name, repr(cl), repr(names_to_test_classes[name]))) |
| 49 names_to_test_classes[name] = cl |
| 50 |
| 51 |
| 52 def _TestRangeForShard(total_shards, shard_index, num_tests): |
| 53 """Returns a 2-tuple containing the start (inclusive) and ending |
| 54 (exclusive) indices of the tests that should be run, given that |
| 55 |num_tests| tests are split across |total_shards| shards, and that |
| 56 |shard_index| is currently being run. |
| 57 """ |
| 58 assert num_tests >= 0 |
| 59 assert total_shards >= 1 |
| 60 assert shard_index >= 0 and shard_index < total_shards, ( |
| 61 'shard_index (%d) must be >= 0 and < total_shards (%d)' % |
| 62 (shard_index, total_shards)) |
| 63 if num_tests == 0: |
| 64 return (0, 0) |
| 65 floored_tests_per_shard = num_tests // total_shards |
| 66 remaining_tests = num_tests % total_shards |
| 67 if remaining_tests == 0: |
| 68 return (floored_tests_per_shard * shard_index, |
| 69 floored_tests_per_shard * (1 + shard_index)) |
| 70 # More complicated. Some shards will run floored_tests_per_shard |
| 71 # tests, and some will run 1 + floored_tests_per_shard. |
| 72 num_earlier_shards_with_one_extra_test = min(remaining_tests, shard_index) |
| 73 num_earlier_shards_with_no_extra_tests = max( |
| 74 0, shard_index - num_earlier_shards_with_one_extra_test) |
| 75 num_earlier_tests = ( |
| 76 num_earlier_shards_with_one_extra_test * (floored_tests_per_shard + 1) + |
| 77 num_earlier_shards_with_no_extra_tests * floored_tests_per_shard) |
| 78 tests_for_this_shard = floored_tests_per_shard |
| 79 if shard_index < remaining_tests: |
| 80 tests_for_this_shard += 1 |
| 81 return (num_earlier_tests, num_earlier_tests + tests_for_this_shard) |
| 82 |
| 83 |
| 84 def _MedianTestTime(test_times): |
| 85 times = test_times.values() |
| 86 times.sort() |
| 87 if len(times) == 0: |
| 88 return 0 |
| 89 halfLen = len(times) / 2 |
| 90 if len(times) % 2: |
| 91 return times[halfLen] |
| 92 else: |
| 93 return 0.5 * (times[halfLen - 1] + times[halfLen]) |
| 94 |
| 95 |
| 96 def _TestTime(test, test_times, default_test_time): |
| 97 return test_times.get(test.shortName()) or default_test_time |
| 98 |
| 99 |
| 100 def _DebugShardDistributions(shards, test_times): |
| 101 for i, s in enumerate(shards): |
| 102 num_tests = len(s) |
| 103 if test_times: |
| 104 median = _MedianTestTime(test_times) |
| 105 shard_time = 0.0 |
| 106 for t in s: |
| 107 shard_time += _TestTime(t, test_times, median) |
| 108 print 'shard %d: %d seconds (%d tests)' % (i, shard_time, num_tests) |
| 109 else: |
| 110 print 'shard %d: %d tests (unknown duration)' % (i, num_tests) |
| 111 |
| 112 |
| 113 def _SplitShardsByTime(test_cases, total_shards, test_times, |
| 114 debug_shard_distributions): |
| 115 median = _MedianTestTime(test_times) |
| 116 shards = [] |
| 117 for i in xrange(total_shards): |
| 118 shards.append({'total_time': 0.0, 'tests': []}) |
| 119 test_cases.sort(key=lambda t: _TestTime(t, test_times, median), |
| 120 reverse=True) |
| 121 |
| 122 # The greedy algorithm has been empirically tested on the WebGL 2.0 |
| 123 # conformance tests' times, and results in an essentially perfect |
| 124 # shard distribution of 530 seconds per shard. In the same scenario, |
| 125 # round-robin scheduling resulted in shard times spread between 502 |
| 126 # and 592 seconds, and the current alphabetical sharding resulted in |
| 127 # shard times spread between 44 and 1591 seconds. |
| 128 |
| 129 # Greedy scheduling. O(m*n), where m is the number of shards and n |
| 130 # is the number of test cases. |
| 131 for t in test_cases: |
| 132 min_shard_index = 0 |
| 133 min_shard_time = None |
| 134 for i in xrange(total_shards): |
| 135 if min_shard_time is None or shards[i]['total_time'] < min_shard_time: |
| 136 min_shard_index = i |
| 137 min_shard_time = shards[i]['total_time'] |
| 138 shards[min_shard_index]['tests'].append(t) |
| 139 shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median) |
| 140 |
| 141 res = [s['tests'] for s in shards] |
| 142 if debug_shard_distributions: |
| 143 _DebugShardDistributions(res, test_times) |
| 144 |
| 145 return res |
| 146 |
| 147 |
| 148 def LoadTestCasesToBeRun( |
| 149 test_class, finder_options, filter_regex_str, filter_tests_after_sharding, |
| 150 total_shards, shard_index, test_times, debug_shard_distributions): |
| 151 test_cases = [] |
| 152 real_regex = re.compile(filter_regex_str) |
| 153 noop_regex = re.compile('') |
| 154 if filter_tests_after_sharding: |
| 155 filter_regex = noop_regex |
| 156 post_filter_regex = real_regex |
| 157 else: |
| 158 filter_regex = real_regex |
| 159 post_filter_regex = noop_regex |
| 160 |
| 161 for t in serially_executed_browser_test_case.GenerateTestCases( |
| 162 test_class, finder_options): |
| 163 if filter_regex.search(t.shortName()): |
| 164 test_cases.append(t) |
| 165 |
| 166 if test_times: |
| 167 # Assign tests to shards. |
| 168 shards = _SplitShardsByTime(test_cases, total_shards, test_times, |
| 169 debug_shard_distributions) |
| 170 return [t for t in shards[shard_index] |
| 171 if post_filter_regex.search(t.shortName())] |
| 172 else: |
| 173 test_cases.sort(key=lambda t: t.shortName()) |
| 174 test_range = _TestRangeForShard(total_shards, shard_index, len(test_cases)) |
| 175 if debug_shard_distributions: |
| 176 tmp_shards = [] |
| 177 for i in xrange(total_shards): |
| 178 tmp_range = _TestRangeForShard(total_shards, i, len(test_cases)) |
| 179 tmp_shards.append(test_cases[tmp_range[0]:tmp_range[1]]) |
| 180 # Can edit the code to get 'test_times' passed in here for |
| 181 # debugging and comparison purposes. |
| 182 _DebugShardDistributions(tmp_shards, None) |
| 183 return [t for t in test_cases[test_range[0]:test_range[1]] |
| 184 if post_filter_regex.search(t.shortName())] |
| 185 |
| 186 |
| 187 def _CreateTestArgParsers(): |
| 188 parser = typ.ArgumentParser(discovery=False, reporting=True, running=True) |
| 189 parser.add_argument('test', type=str, help='Name of the test suite to run') |
| 190 parser.add_argument('--test-filter', type=str, default='', action='store', |
| 191 help='Run only tests whose names match the given filter regexp.') |
| 192 parser.add_argument( |
| 193 '--filter-tests-after-sharding', default=False, action='store_true', |
| 194 help=('Apply the test filter after tests are split for sharding. Useful ' |
| 195 'for reproducing bugs related to the order in which tests run.')) |
| 196 parser.add_argument( |
| 197 '--read-abbreviated-json-results-from', metavar='FILENAME', |
| 198 action='store', help=( |
| 199 'If specified, reads abbreviated results from that path in json form. ' |
| 200 'This information is used to more evenly distribute tests among ' |
| 201 'shards.')) |
| 202 parser.add_argument('--debug-shard-distributions', |
| 203 action='store_true', default=False, |
| 204 help='Print debugging information about the shards\' test distributions') |
| 205 |
| 206 parser.add_argument('--default-chrome-root', type=str, default=None) |
| 207 parser.add_argument('--client-config', dest='client_configs', |
| 208 action='append', default=[]) |
| 209 parser.add_argument('--start-dir', dest='start_dirs', |
| 210 action='append', default=[]) |
| 211 parser.add_argument('--skip', metavar='glob', default=[], |
| 212 action='append', |
| 213 help=('Globs of test names to skip (defaults to %(default)s).')) |
| 214 return parser |
| 215 |
| 216 |
| 217 def _SkipMatch(name, skipGlobs): |
| 218 return any(fnmatch.fnmatch(name, glob) for glob in skipGlobs) |
| 219 |
| 220 |
| 221 def _GetClassifier(args): |
| 222 def _SeriallyExecutedBrowserTestCaseClassifer(test_set, test): |
| 223 # Do not pick up tests that do not inherit from |
| 224 # serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase |
| 225 # class. |
| 226 if not isinstance(test, |
| 227 serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase): |
| 228 return |
| 229 name = test.id() |
| 230 if _SkipMatch(name, args.skip): |
| 231 test_set.tests_to_skip.append( |
| 232 typ.TestInput(name, 'skipped because matched --skip')) |
| 233 return |
| 234 # For now, only support running these tests serially. |
| 235 test_set.isolated_tests.append(typ.TestInput(name)) |
| 236 return _SeriallyExecutedBrowserTestCaseClassifer |
| 237 |
| 238 |
| 239 def RunTests(args): |
| 240 parser = _CreateTestArgParsers() |
| 241 try: |
| 242 options, extra_args = parser.parse_known_args(args) |
| 243 except arg_parser._Bailout: |
| 244 return parser.exit_status |
| 245 binary_manager.InitDependencyManager(options.client_configs) |
| 246 |
| 247 for start_dir in options.start_dirs: |
| 248 modules_to_classes = discover.DiscoverClasses( |
| 249 start_dir, options.top_level_dir, |
| 250 base_class=serially_executed_browser_test_case. |
| 251 SeriallyExecutedBrowserTestCase) |
| 252 browser_test_classes = modules_to_classes.values() |
| 253 |
| 254 _ValidateDistinctNames(browser_test_classes) |
| 255 |
| 256 test_class = None |
| 257 for cl in browser_test_classes: |
| 258 if cl.Name() == options.test: |
| 259 test_class = cl |
| 260 break |
| 261 |
| 262 if not test_class: |
| 263 print 'Cannot find test class with name matching %s' % options.test |
| 264 print 'Available tests: %s' % '\n'.join( |
| 265 cl.Name() for cl in browser_test_classes) |
| 266 return 1 |
| 267 |
| 268 # Create test context. |
| 269 context = browser_test_context.TypTestContext() |
| 270 for c in options.client_configs: |
| 271 context.client_configs.append(c) |
| 272 context.finder_options = ProcessCommandLineOptions( |
| 273 test_class, options.default_chrome_root, extra_args) |
| 274 context.test_class = test_class |
| 275 test_times = None |
| 276 if options.read_abbreviated_json_results_from: |
| 277 with open(options.read_abbreviated_json_results_from, 'r') as f: |
| 278 abbr_results = json.load(f) |
| 279 test_times = abbr_results.get('times') |
| 280 tests_to_run = LoadTestCasesToBeRun( |
| 281 test_class=test_class, finder_options=context.finder_options, |
| 282 filter_regex_str=options.test_filter, |
| 283 filter_tests_after_sharding=options.filter_tests_after_sharding, |
| 284 total_shards=options.total_shards, shard_index=options.shard_index, |
| 285 test_times=test_times, |
| 286 debug_shard_distributions=options.debug_shard_distributions) |
| 287 for t in tests_to_run: |
| 288 context.test_case_ids_to_run.add(t.id()) |
| 289 context.Freeze() |
| 290 browser_test_context._global_test_context = context |
| 291 |
| 292 # Setup typ runner. |
| 293 runner = typ.Runner() |
| 294 |
| 295 runner.context = context |
| 296 runner.setup_fn = _SetUpProcess |
| 297 runner.teardown_fn = _TearDownProcess |
| 298 |
| 299 runner.args.jobs = options.jobs |
| 300 runner.args.metadata = options.metadata |
| 301 runner.args.passthrough = options.passthrough |
| 302 runner.args.path = options.path |
| 303 runner.args.retry_limit = options.retry_limit |
| 304 runner.args.test_results_server = options.test_results_server |
| 305 runner.args.test_type = options.test_type |
| 306 runner.args.top_level_dir = options.top_level_dir |
| 307 runner.args.write_full_results_to = options.write_full_results_to |
| 308 runner.args.write_trace_to = options.write_trace_to |
| 309 runner.args.list_only = options.list_only |
| 310 runner.classifier = _GetClassifier(options) |
| 311 |
| 312 runner.args.suffixes = TEST_SUFFIXES |
| 313 |
| 314 # Since sharding logic is handled by browser_test_runner harness by passing |
| 315 # browser_test_context.test_case_ids_to_run to subprocess to indicate test |
| 316 # cases to be run, we explicitly disable sharding logic in typ. |
| 317 runner.args.total_shards = 1 |
| 318 runner.args.shard_index = 0 |
| 319 |
| 320 runner.args.timing = True |
| 321 runner.args.verbose = options.verbose |
| 322 runner.win_multiprocessing = typ.WinMultiprocessing.importable |
| 323 try: |
| 324 ret, _, _ = runner.run() |
| 325 except KeyboardInterrupt: |
| 326 print >> sys.stderr, "interrupted, exiting" |
| 327 ret = 130 |
| 328 return ret |
| 329 |
| 330 |
| 331 def _SetUpProcess(child, context): |
| 332 del child # Unused. |
| 333 args = context.finder_options |
| 334 if binary_manager.NeedsInit(): |
| 335 # On windows, typ doesn't keep the DependencyManager initialization in the |
| 336 # child processes. |
| 337 binary_manager.InitDependencyManager(context.client_configs) |
| 338 if args.remote_platform_options.device == 'android': |
| 339 android_devices = android_device.FindAllAvailableDevices(args) |
| 340 if not android_devices: |
| 341 raise RuntimeError("No Android device found") |
| 342 android_devices.sort(key=lambda device: device.name) |
| 343 args.remote_platform_options.device = ( |
| 344 android_devices[child.worker_num-1].guid) |
| 345 browser_test_context._global_test_context = context |
| 346 context.test_class.SetUpProcess() |
| 347 |
| 348 |
| 349 def _TearDownProcess(child, context): |
| 350 del child, context # Unused. |
| 351 browser_test_context._global_test_context.test_class.TearDownProcess() |
| 352 browser_test_context._global_test_context = None |
| 353 |
| 354 |
| 355 if __name__ == '__main__': |
| 356 ret_code = RunTests(sys.argv[1:]) |
| 357 sys.exit(ret_code) |
OLD | NEW |