Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(90)

Side by Side Diff: telemetry/telemetry/testing/run_browser_tests.py

Issue 2700563004: [Telemetry] Migrate browser_test_runner to use typ as the test runner (Closed)
Patch Set: Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import argparse 5 import fnmatch
6 import re
7 import sys
6 import json 8 import json
7 import logging
8 import re
9 import time
10 import unittest
11 9
12 from telemetry.core import discover 10 from telemetry.core import discover
13 from telemetry.internal.browser import browser_options 11 from telemetry.internal.browser import browser_options
12 from telemetry.internal.platform import android_device
14 from telemetry.internal.util import binary_manager 13 from telemetry.internal.util import binary_manager
15 from telemetry.testing import options_for_unittests 14 from telemetry.testing import browser_test_context
16 from telemetry.testing import serially_executed_browser_test_case 15 from telemetry.testing import serially_executed_browser_test_case
17 16
17 import typ
18 from typ import arg_parser
19
18 DEFAULT_LOG_FORMAT = ( 20 DEFAULT_LOG_FORMAT = (
19 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d ' 21 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d '
20 '%(message)s') 22 '%(message)s')
21 23
22 24
23 def ProcessCommandLineOptions(test_class, project_config, args): 25 TEST_SUFFIXES = ['*_test.py', '*_tests.py', '*_unittest.py', '*_unittests.py']
26
27
28 def ProcessCommandLineOptions(test_class, default_chrome_root, args):
24 options = browser_options.BrowserFinderOptions() 29 options = browser_options.BrowserFinderOptions()
25 options.browser_type = 'any' 30 options.browser_type = 'any'
26 parser = options.CreateParser(test_class.__doc__) 31 parser = options.CreateParser(test_class.__doc__)
27 test_class.AddCommandlineArgs(parser) 32 test_class.AddCommandlineArgs(parser)
28 # Set the default chrome root variable. This is required for the 33 # Set the default chrome root variable. This is required for the
29 # Android browser finder to function properly. 34 # Android browser finder to function properly.
30 parser.set_defaults(chrome_root=project_config.default_chrome_root) 35 if default_chrome_root:
36 parser.set_defaults(chrome_root=default_chrome_root)
31 finder_options, positional_args = parser.parse_args(args) 37 finder_options, positional_args = parser.parse_args(args)
32 finder_options.positional_args = positional_args 38 finder_options.positional_args = positional_args
33 options_for_unittests.Push(finder_options)
34 # Use this to signal serially_executed_browser_test_case.LoadAllTestsInModule
35 # not to load tests in cases it's not invoked by browser_test_runner
36 # framework.
37 finder_options.browser_test_runner_running = True
38 return finder_options 39 return finder_options
39 40
40 41
41 def _ValidateDistinctNames(browser_test_classes): 42 def _ValidateDistinctNames(browser_test_classes):
42 names_to_test_classes = {} 43 names_to_test_classes = {}
43 for cl in browser_test_classes: 44 for cl in browser_test_classes:
44 name = cl.Name() 45 name = cl.Name()
45 if name in names_to_test_classes: 46 if name in names_to_test_classes:
46 raise Exception('Test name %s is duplicated between %s and %s' % ( 47 raise Exception('Test name %s is duplicated between %s and %s' % (
47 name, repr(cl), repr(names_to_test_classes[name]))) 48 name, repr(cl), repr(names_to_test_classes[name])))
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
137 shards[min_shard_index]['tests'].append(t) 138 shards[min_shard_index]['tests'].append(t)
138 shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median) 139 shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median)
139 140
140 res = [s['tests'] for s in shards] 141 res = [s['tests'] for s in shards]
141 if debug_shard_distributions: 142 if debug_shard_distributions:
142 _DebugShardDistributions(res, test_times) 143 _DebugShardDistributions(res, test_times)
143 144
144 return res 145 return res
145 146
146 147
147 def _LoadTests(test_class, finder_options, filter_regex_str, 148 def LoadTestCasesToBeRun(
148 filter_tests_after_sharding, 149 test_class, finder_options, filter_regex_str, filter_tests_after_sharding,
149 total_shards, shard_index, test_times, 150 total_shards, shard_index, test_times, debug_shard_distributions):
150 debug_shard_distributions):
151 test_cases = [] 151 test_cases = []
152 real_regex = re.compile(filter_regex_str) 152 real_regex = re.compile(filter_regex_str)
153 noop_regex = re.compile('') 153 noop_regex = re.compile('')
154 if filter_tests_after_sharding: 154 if filter_tests_after_sharding:
155 filter_regex = noop_regex 155 filter_regex = noop_regex
156 post_filter_regex = real_regex 156 post_filter_regex = real_regex
157 else: 157 else:
158 filter_regex = real_regex 158 filter_regex = real_regex
159 post_filter_regex = noop_regex 159 post_filter_regex = noop_regex
160 160
(...skipping 16 matching lines...) Expand all
177 for i in xrange(total_shards): 177 for i in xrange(total_shards):
178 tmp_range = _TestRangeForShard(total_shards, i, len(test_cases)) 178 tmp_range = _TestRangeForShard(total_shards, i, len(test_cases))
179 tmp_shards.append(test_cases[tmp_range[0]:tmp_range[1]]) 179 tmp_shards.append(test_cases[tmp_range[0]:tmp_range[1]])
180 # Can edit the code to get 'test_times' passed in here for 180 # Can edit the code to get 'test_times' passed in here for
181 # debugging and comparison purposes. 181 # debugging and comparison purposes.
182 _DebugShardDistributions(tmp_shards, None) 182 _DebugShardDistributions(tmp_shards, None)
183 return [t for t in test_cases[test_range[0]:test_range[1]] 183 return [t for t in test_cases[test_range[0]:test_range[1]]
184 if post_filter_regex.search(t.shortName())] 184 if post_filter_regex.search(t.shortName())]
185 185
186 186
187 class TestRunOptions(object): 187 def _CreateTestArgParsers():
188 def __init__(self): 188 parser = typ.ArgumentParser(discovery=False, reporting=True, running=True)
189 self.verbosity = 2
190
191
192 class BrowserTestResult(unittest.TextTestResult):
193 def __init__(self, *args, **kwargs):
194 super(BrowserTestResult, self).__init__(*args, **kwargs)
195 self.successes = []
196 self.times = {}
197 self._current_test_start_time = 0
198
199 def addSuccess(self, test):
200 super(BrowserTestResult, self).addSuccess(test)
201 self.successes.append(test)
202
203 def startTest(self, test):
204 super(BrowserTestResult, self).startTest(test)
205 self._current_test_start_time = time.time()
206
207 def stopTest(self, test):
208 super(BrowserTestResult, self).stopTest(test)
209 self.times[test.shortName()] = (time.time() - self._current_test_start_time)
210
211
212 def Run(project_config, test_run_options, args, **log_config_kwargs):
213 # the log level is set in browser_options
214 log_config_kwargs.pop('level', None)
215 log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
216 logging.basicConfig(**log_config_kwargs)
217
218 binary_manager.InitDependencyManager(project_config.client_configs)
219 parser = argparse.ArgumentParser(description='Run a browser test suite')
220 parser.add_argument('test', type=str, help='Name of the test suite to run') 189 parser.add_argument('test', type=str, help='Name of the test suite to run')
221 parser.add_argument(
222 '--write-abbreviated-json-results-to', metavar='FILENAME', action='store',
223 help=('If specified, writes the full results to that path in json form.'))
224 parser.add_argument('--test-filter', type=str, default='', action='store', 190 parser.add_argument('--test-filter', type=str, default='', action='store',
225 help='Run only tests whose names match the given filter regexp.') 191 help='Run only tests whose names match the given filter regexp.')
226 parser.add_argument('--total-shards', default=1, type=int,
227 help='Total number of shards being used for this test run. (The user of '
228 'this script is responsible for spawning all of the shards.)')
229 parser.add_argument('--shard-index', default=0, type=int,
230 help='Shard index (0..total_shards-1) of this test run.')
231 parser.add_argument( 192 parser.add_argument(
232 '--filter-tests-after-sharding', default=False, action='store_true', 193 '--filter-tests-after-sharding', default=False, action='store_true',
233 help=('Apply the test filter after tests are split for sharding. Useful ' 194 help=('Apply the test filter after tests are split for sharding. Useful '
234 'for reproducing bugs related to the order in which tests run.')) 195 'for reproducing bugs related to the order in which tests run.'))
235 parser.add_argument( 196 parser.add_argument(
236 '--read-abbreviated-json-results-from', metavar='FILENAME', 197 '--read-abbreviated-json-results-from', metavar='FILENAME',
237 action='store', help=( 198 action='store', help=(
238 'If specified, reads abbreviated results from that path in json form. ' 199 'If specified, reads abbreviated results from that path in json form. '
239 'The file format is that written by ' 200 'This information is used to more evenly distribute tests among '
240 '--write-abbreviated-json-results-to. This information is used to more ' 201 'shards.'))
241 'evenly distribute tests among shards.'))
242 parser.add_argument('--debug-shard-distributions', 202 parser.add_argument('--debug-shard-distributions',
243 action='store_true', default=False, 203 action='store_true', default=False,
244 help='Print debugging information about the shards\' test distributions') 204 help='Print debugging information about the shards\' test distributions')
245 205
246 option, extra_args = parser.parse_known_args(args) 206 parser.add_argument('--default-chrome-root', type=str, default=None)
207 parser.add_argument('--client-config', dest='client_configs',
208 action='append', default=[])
209 parser.add_argument('--start-dir', dest='start_dirs',
210 action='append', default=[])
211 parser.add_argument('--skip', metavar='glob', default=[],
212 action='append',
213 help=('Globs of test names to skip (defaults to %(default)s).'))
214 return parser
247 215
248 for start_dir in project_config.start_dirs: 216
217 def _SkipMatch(name, skipGlobs):
218 return any(fnmatch.fnmatch(name, glob) for glob in skipGlobs)
219
220
221 def _GetClassifier(args):
222 def _SeriallyExecutedBrowserTestCaseClassifer(test_set, test):
223 # Do not pick up tests that do not inherit from
224 # serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase
225 # class.
226 if not isinstance(test,
227 serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
228 return
229 name = test.id()
230 if _SkipMatch(name, args.skip):
231 test_set.tests_to_skip.append(
232 typ.TestInput(name, 'skipped because matched --skip'))
233 return
234 # For now, only support running these tests serially.
235 test_set.isolated_tests.append(typ.TestInput(name))
236 return _SeriallyExecutedBrowserTestCaseClassifer
237
238
239 def RunTests(args):
240 parser = _CreateTestArgParsers()
241 try:
242 options, extra_args = parser.parse_known_args(args)
243 except arg_parser._Bailout:
244 return parser.exit_status
245 binary_manager.InitDependencyManager(options.client_configs)
246
247 for start_dir in options.start_dirs:
249 modules_to_classes = discover.DiscoverClasses( 248 modules_to_classes = discover.DiscoverClasses(
250 start_dir, project_config.top_level_dir, 249 start_dir, options.top_level_dir,
251 base_class=serially_executed_browser_test_case. 250 base_class=serially_executed_browser_test_case.
252 SeriallyExecutedBrowserTestCase) 251 SeriallyExecutedBrowserTestCase)
253 browser_test_classes = modules_to_classes.values() 252 browser_test_classes = modules_to_classes.values()
254 253
255 _ValidateDistinctNames(browser_test_classes) 254 _ValidateDistinctNames(browser_test_classes)
256 255
257 test_class = None 256 test_class = None
258 for cl in browser_test_classes: 257 for cl in browser_test_classes:
259 if cl.Name() == option.test: 258 if cl.Name() == options.test:
260 test_class = cl 259 test_class = cl
261 break 260 break
262 261
263 if not test_class: 262 if not test_class:
264 print 'Cannot find test class with name matching %s' % option.test 263 print 'Cannot find test class with name matching %s' % options.test
265 print 'Available tests: %s' % '\n'.join( 264 print 'Available tests: %s' % '\n'.join(
266 cl.Name() for cl in browser_test_classes) 265 cl.Name() for cl in browser_test_classes)
267 return 1 266 return 1
268 267
269 options = ProcessCommandLineOptions(test_class, project_config, extra_args) 268 # Create test context.
270 269 context = browser_test_context.TypTestContext()
270 for c in options.client_configs:
271 context.client_configs.append(c)
272 context.finder_options = ProcessCommandLineOptions(
273 test_class, options.default_chrome_root, extra_args)
274 context.test_class = test_class
271 test_times = None 275 test_times = None
272 if option.read_abbreviated_json_results_from: 276 if options.read_abbreviated_json_results_from:
273 with open(option.read_abbreviated_json_results_from, 'r') as f: 277 with open(options.read_abbreviated_json_results_from, 'r') as f:
274 abbr_results = json.load(f) 278 abbr_results = json.load(f)
275 test_times = abbr_results.get('times') 279 test_times = abbr_results.get('times')
280 tests_to_run = LoadTestCasesToBeRun(
281 test_class=test_class, finder_options=context.finder_options,
282 filter_regex_str=options.test_filter,
283 filter_tests_after_sharding=options.filter_tests_after_sharding,
284 total_shards=options.total_shards, shard_index=options.shard_index,
285 test_times=test_times,
286 debug_shard_distributions=options.debug_shard_distributions)
287 for t in tests_to_run:
288 context.test_case_ids_to_run.add(t.id())
289 context.Freeze()
290 browser_test_context._global_test_context = context
276 291
277 suite = unittest.TestSuite() 292 # Setup typ runner.
278 for test in _LoadTests(test_class, options, option.test_filter, 293 runner = typ.Runner()
279 option.filter_tests_after_sharding,
280 option.total_shards, option.shard_index,
281 test_times, option.debug_shard_distributions):
282 suite.addTest(test)
283 294
284 results = unittest.TextTestRunner( 295 runner.context = context
285 verbosity=test_run_options.verbosity, 296 runner.setup_fn = _SetUpProcess
286 resultclass=BrowserTestResult).run(suite) 297 runner.teardown_fn = _TearDownProcess
287 if option.write_abbreviated_json_results_to: 298
288 with open(option.write_abbreviated_json_results_to, 'w') as f: 299 runner.args.jobs = options.jobs
289 json_results = {'failures': [], 'successes': [], 300 runner.args.metadata = options.metadata
290 'times': {}, 'valid': True} 301 runner.args.passthrough = options.passthrough
291 # Treat failures and errors identically in the JSON 302 runner.args.path = options.path
292 # output. Failures are those which cooperatively fail using 303 runner.args.retry_limit = options.retry_limit
293 # Python's unittest APIs; errors are those which abort the test 304 runner.args.test_results_server = options.test_results_server
294 # case early with an execption. 305 runner.args.test_type = options.test_type
295 failures = [] 306 runner.args.top_level_dir = options.top_level_dir
296 for fail, _ in results.failures + results.errors: 307 runner.args.write_full_results_to = options.write_full_results_to
297 # When errors in thrown in individual test method or setUp or tearDown, 308 runner.args.write_trace_to = options.write_trace_to
298 # fail would be an instance of unittest.TestCase. 309 runner.args.list_only = options.list_only
299 if isinstance(fail, unittest.TestCase): 310 runner.classifier = _GetClassifier(options)
300 failures.append(fail.shortName()) 311
301 else: 312 runner.args.suffixes = TEST_SUFFIXES
302 # When errors in thrown in setupClass or tearDownClass, an instance of 313
303 # _ErrorHolder is is placed in results.errors list. We use the id() 314 # Since sharding logic is handled by browser_test_runner harness by passing
304 # as failure name in this case since shortName() is not available. 315 # browser_test_context.test_case_ids_to_run to subprocess to indicate test
305 failures.append(fail.id()) 316 # cases to be run, we explicitly disable sharding logic in typ.
306 failures = sorted(list(failures)) 317 runner.args.total_shards = 1
307 for failure_id in failures: 318 runner.args.shard_index = 0
308 json_results['failures'].append(failure_id) 319
309 for passed_test_case in results.successes: 320 runner.args.timing = True
310 json_results['successes'].append(passed_test_case.shortName()) 321 runner.args.verbose = options.verbose
311 json_results['times'].update(results.times) 322 runner.win_multiprocessing = typ.WinMultiprocessing.importable
312 json.dump(json_results, f) 323 try:
313 return len(results.failures + results.errors) 324 ret, _, _ = runner.run()
325 except KeyboardInterrupt:
326 print >> sys.stderr, "interrupted, exiting"
327 ret = 130
328 return ret
329
330
331 def _SetUpProcess(child, context):
332 del child # Unused.
333 args = context.finder_options
334 if binary_manager.NeedsInit():
335 # On windows, typ doesn't keep the DependencyManager initialization in the
336 # child processes.
337 binary_manager.InitDependencyManager(context.client_configs)
338 if args.remote_platform_options.device == 'android':
339 android_devices = android_device.FindAllAvailableDevices(args)
340 if not android_devices:
341 raise RuntimeError("No Android device found")
342 android_devices.sort(key=lambda device: device.name)
343 args.remote_platform_options.device = (
344 android_devices[child.worker_num-1].guid)
345 browser_test_context._global_test_context = context
346 context.test_class.SetUpProcess()
347
348
349 def _TearDownProcess(child, context):
350 del child, context # Unused.
351 browser_test_context._global_test_context.test_class.TearDownProcess()
352 browser_test_context._global_test_context = None
353
354
355 if __name__ == '__main__':
356 ret_code = RunTests(sys.argv[1:])
357 sys.exit(ret_code)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698