Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(599)

Side by Side Diff: telemetry/telemetry/testing/run_browser_tests.py

Issue 2590623002: [Telemetry] Migrate browser_test_runner to use typ as the test runner (Closed)
Patch Set: Add client_configs to the context Created 3 years, 12 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import argparse 5 import re
6 import sys
6 import json 7 import json
7 import logging
8 import re
9 import time
10 import unittest
11 8
12 from telemetry.core import discover 9 from telemetry.core import discover
13 from telemetry.internal.browser import browser_options 10 from telemetry.internal.browser import browser_options
11 from telemetry.internal.platform import android_device
14 from telemetry.internal.util import binary_manager 12 from telemetry.internal.util import binary_manager
15 from telemetry.testing import options_for_unittests 13 from telemetry.testing import browser_test_context
16 from telemetry.testing import serially_executed_browser_test_case 14 from telemetry.testing import serially_executed_browser_test_case
17 15
16 import typ
17 from typ import arg_parser
18
18 DEFAULT_LOG_FORMAT = ( 19 DEFAULT_LOG_FORMAT = (
19 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d ' 20 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d '
20 '%(message)s') 21 '%(message)s')
21 22
22 23
23 def ProcessCommandLineOptions(test_class, project_config, args): 24 TEST_SUFFIXES = ['*_test.py', '*_tests.py', '*_unittest.py', '*_unittests.py']
nednguyen 2017/01/09 22:25:00 Ken, Dirk: I can exclude _unittest to fix the bug
25
26
27 def ProcessCommandLineOptions(test_class, default_chrome_root, args):
24 options = browser_options.BrowserFinderOptions() 28 options = browser_options.BrowserFinderOptions()
25 options.browser_type = 'any' 29 options.browser_type = 'any'
26 parser = options.CreateParser(test_class.__doc__) 30 parser = options.CreateParser(test_class.__doc__)
27 test_class.AddCommandlineArgs(parser) 31 test_class.AddCommandlineArgs(parser)
28 # Set the default chrome root variable. This is required for the 32 # Set the default chrome root variable. This is required for the
29 # Android browser finder to function properly. 33 # Android browser finder to function properly.
30 parser.set_defaults(chrome_root=project_config.default_chrome_root) 34 if default_chrome_root:
35 parser.set_defaults(chrome_root=default_chrome_root)
31 finder_options, positional_args = parser.parse_args(args) 36 finder_options, positional_args = parser.parse_args(args)
32 finder_options.positional_args = positional_args 37 finder_options.positional_args = positional_args
33 options_for_unittests.Push(finder_options)
34 return finder_options 38 return finder_options
35 39
36 40
37 def _ValidateDistinctNames(browser_test_classes): 41 def _ValidateDistinctNames(browser_test_classes):
38 names_to_test_classes = {} 42 names_to_test_classes = {}
39 for cl in browser_test_classes: 43 for cl in browser_test_classes:
40 name = cl.Name() 44 name = cl.Name()
41 if name in names_to_test_classes: 45 if name in names_to_test_classes:
42 raise Exception('Test name %s is duplicated between %s and %s' % ( 46 raise Exception('Test name %s is duplicated between %s and %s' % (
43 name, repr(cl), repr(names_to_test_classes[name]))) 47 name, repr(cl), repr(names_to_test_classes[name])))
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
133 shards[min_shard_index]['tests'].append(t) 137 shards[min_shard_index]['tests'].append(t)
134 shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median) 138 shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median)
135 139
136 res = [s['tests'] for s in shards] 140 res = [s['tests'] for s in shards]
137 if debug_shard_distributions: 141 if debug_shard_distributions:
138 _DebugShardDistributions(res, test_times) 142 _DebugShardDistributions(res, test_times)
139 143
140 return res 144 return res
141 145
142 146
143 def _LoadTests(test_class, finder_options, filter_regex_str, 147 def LoadTestCasesToBeRun(
144 filter_tests_after_sharding, 148 test_class, finder_options, filter_regex_str, filter_tests_after_sharding,
145 total_shards, shard_index, test_times, 149 total_shards, shard_index, test_times, debug_shard_distributions):
146 debug_shard_distributions):
147 test_cases = [] 150 test_cases = []
148 real_regex = re.compile(filter_regex_str) 151 real_regex = re.compile(filter_regex_str)
149 noop_regex = re.compile('') 152 noop_regex = re.compile('')
150 if filter_tests_after_sharding: 153 if filter_tests_after_sharding:
151 filter_regex = noop_regex 154 filter_regex = noop_regex
152 post_filter_regex = real_regex 155 post_filter_regex = real_regex
153 else: 156 else:
154 filter_regex = real_regex 157 filter_regex = real_regex
155 post_filter_regex = noop_regex 158 post_filter_regex = noop_regex
156 159
(...skipping 16 matching lines...) Expand all
173 for i in xrange(total_shards): 176 for i in xrange(total_shards):
174 tmp_range = _TestRangeForShard(total_shards, i, len(test_cases)) 177 tmp_range = _TestRangeForShard(total_shards, i, len(test_cases))
175 tmp_shards.append(test_cases[tmp_range[0]:tmp_range[1]]) 178 tmp_shards.append(test_cases[tmp_range[0]:tmp_range[1]])
176 # Can edit the code to get 'test_times' passed in here for 179 # Can edit the code to get 'test_times' passed in here for
177 # debugging and comparison purposes. 180 # debugging and comparison purposes.
178 _DebugShardDistributions(tmp_shards, None) 181 _DebugShardDistributions(tmp_shards, None)
179 return [t for t in test_cases[test_range[0]:test_range[1]] 182 return [t for t in test_cases[test_range[0]:test_range[1]]
180 if post_filter_regex.search(t.shortName())] 183 if post_filter_regex.search(t.shortName())]
181 184
182 185
183 class TestRunOptions(object): 186 def _CreateTestArgParsers():
184 def __init__(self): 187 parser = typ.ArgumentParser(discovery=False, reporting=True, running=True)
185 self.verbosity = 2
186
187
188 class BrowserTestResult(unittest.TextTestResult):
189 def __init__(self, *args, **kwargs):
190 super(BrowserTestResult, self).__init__(*args, **kwargs)
191 self.successes = []
192 self.times = {}
193 self._current_test_start_time = 0
194
195 def addSuccess(self, test):
196 super(BrowserTestResult, self).addSuccess(test)
197 self.successes.append(test)
198
199 def startTest(self, test):
200 super(BrowserTestResult, self).startTest(test)
201 self._current_test_start_time = time.time()
202
203 def stopTest(self, test):
204 super(BrowserTestResult, self).stopTest(test)
205 self.times[test.shortName()] = (time.time() - self._current_test_start_time)
206
207
208 def Run(project_config, test_run_options, args, **log_config_kwargs):
209 # the log level is set in browser_options
210 log_config_kwargs.pop('level', None)
211 log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
212 logging.basicConfig(**log_config_kwargs)
213
214 binary_manager.InitDependencyManager(project_config.client_configs)
215 parser = argparse.ArgumentParser(description='Run a browser test suite')
216 parser.add_argument('test', type=str, help='Name of the test suite to run') 188 parser.add_argument('test', type=str, help='Name of the test suite to run')
217 parser.add_argument(
218 '--write-abbreviated-json-results-to', metavar='FILENAME', action='store',
219 help=('If specified, writes the full results to that path in json form.'))
220 parser.add_argument('--test-filter', type=str, default='', action='store', 189 parser.add_argument('--test-filter', type=str, default='', action='store',
221 help='Run only tests whose names match the given filter regexp.') 190 help='Run only tests whose names match the given filter regexp.')
222 parser.add_argument('--total-shards', default=1, type=int,
223 help='Total number of shards being used for this test run. (The user of '
224 'this script is responsible for spawning all of the shards.)')
225 parser.add_argument('--shard-index', default=0, type=int,
226 help='Shard index (0..total_shards-1) of this test run.')
227 parser.add_argument( 191 parser.add_argument(
228 '--filter-tests-after-sharding', default=False, action='store_true', 192 '--filter-tests-after-sharding', default=False, action='store_true',
229 help=('Apply the test filter after tests are split for sharding. Useful ' 193 help=('Apply the test filter after tests are split for sharding. Useful '
230 'for reproducing bugs related to the order in which tests run.')) 194 'for reproducing bugs related to the order in which tests run.'))
231 parser.add_argument( 195 parser.add_argument(
232 '--read-abbreviated-json-results-from', metavar='FILENAME', 196 '--read-abbreviated-json-results-from', metavar='FILENAME',
233 action='store', help=( 197 action='store', help=(
234 'If specified, reads abbreviated results from that path in json form. ' 198 'If specified, reads abbreviated results from that path in json form. '
235 'The file format is that written by ' 199 'This information is used to more evenly distribute tests among '
236 '--write-abbreviated-json-results-to. This information is used to more ' 200 'shards.'))
237 'evenly distribute tests among shards.'))
238 parser.add_argument('--debug-shard-distributions', 201 parser.add_argument('--debug-shard-distributions',
239 action='store_true', default=False, 202 action='store_true', default=False,
240 help='Print debugging information about the shards\' test distributions') 203 help='Print debugging information about the shards\' test distributions')
241 204
242 option, extra_args = parser.parse_known_args(args) 205 parser.add_argument('--default-chrome-root', type=str, default=None)
206 parser.add_argument('--client-config', dest='client_configs',
207 action='append', default=[])
208 parser.add_argument('--start-dir', dest='start_dirs',
209 action='append', default=[])
210 return parser
243 211
244 for start_dir in project_config.start_dirs: 212
213 def RunTests(args):
214 parser = _CreateTestArgParsers()
215 try:
216 options, extra_args = parser.parse_known_args(args)
217 except arg_parser._Bailout:
218 return parser.exit_status
219 binary_manager.InitDependencyManager(options.client_configs)
220
221 for start_dir in options.start_dirs:
245 modules_to_classes = discover.DiscoverClasses( 222 modules_to_classes = discover.DiscoverClasses(
246 start_dir, project_config.top_level_dir, 223 start_dir, options.top_level_dir,
247 base_class=serially_executed_browser_test_case. 224 base_class=serially_executed_browser_test_case.
248 SeriallyExecutedBrowserTestCase) 225 SeriallyExecutedBrowserTestCase)
249 browser_test_classes = modules_to_classes.values() 226 browser_test_classes = modules_to_classes.values()
250 227
251 _ValidateDistinctNames(browser_test_classes) 228 _ValidateDistinctNames(browser_test_classes)
252 229
253 test_class = None 230 test_class = None
254 for cl in browser_test_classes: 231 for cl in browser_test_classes:
255 if cl.Name() == option.test: 232 if cl.Name() == options.test:
256 test_class = cl 233 test_class = cl
257 break 234 break
258 235
259 if not test_class: 236 if not test_class:
260 print 'Cannot find test class with name matching %s' % option.test 237 print 'Cannot find test class with name matching %s' % options.test
261 print 'Available tests: %s' % '\n'.join( 238 print 'Available tests: %s' % '\n'.join(
262 cl.Name() for cl in browser_test_classes) 239 cl.Name() for cl in browser_test_classes)
263 return 1 240 return 1
264 241
265 options = ProcessCommandLineOptions(test_class, project_config, extra_args) 242 # Create test context.
266 243 context = browser_test_context.TypTestContext()
244 for c in options.client_configs:
245 context.client_configs.append(c)
246 context.finder_options = ProcessCommandLineOptions(
247 test_class, options.default_chrome_root, extra_args)
248 context.test_class_name = test_class.Name()
267 test_times = None 249 test_times = None
268 if option.read_abbreviated_json_results_from: 250 if options.read_abbreviated_json_results_from:
269 with open(option.read_abbreviated_json_results_from, 'r') as f: 251 with open(options.read_abbreviated_json_results_from, 'r') as f:
270 abbr_results = json.load(f) 252 abbr_results = json.load(f)
271 test_times = abbr_results.get('times') 253 test_times = abbr_results.get('times')
254 tests_to_run = LoadTestCasesToBeRun(
255 test_class=test_class, finder_options=context.finder_options,
256 filter_regex_str=options.test_filter,
257 filter_tests_after_sharding=options.filter_tests_after_sharding,
258 total_shards=options.total_shards, shard_index=options.shard_index,
259 test_times=test_times,
260 debug_shard_distributions=options.debug_shard_distributions)
261 for t in tests_to_run:
262 context.test_case_ids_to_run.add(t.id())
263 context.Freeze()
264 browser_test_context._global_test_context = context
272 265
273 suite = unittest.TestSuite() 266 # Setup typ runner.
274 for test in _LoadTests(test_class, options, option.test_filter, 267 runner = typ.Runner()
275 option.filter_tests_after_sharding,
276 option.total_shards, option.shard_index,
277 test_times, option.debug_shard_distributions):
278 suite.addTest(test)
279 268
280 results = unittest.TextTestRunner( 269 runner.context = context
281 verbosity=test_run_options.verbosity, 270 runner.setup_fn = _SetUpProcess
282 resultclass=BrowserTestResult).run(suite) 271 runner.teardown_fn = _TearDownProcess
283 if option.write_abbreviated_json_results_to: 272
284 with open(option.write_abbreviated_json_results_to, 'w') as f: 273 runner.args.jobs = options.jobs
285 json_results = {'failures': [], 'successes': [], 274 runner.args.metadata = options.metadata
286 'times': {}, 'valid': True} 275 runner.args.passthrough = options.passthrough
287 # Treat failures and errors identically in the JSON 276 runner.args.path = options.path
288 # output. Failures are those which cooperatively fail using 277 runner.args.retry_limit = options.retry_limit
289 # Python's unittest APIs; errors are those which abort the test 278 runner.args.test_results_server = options.test_results_server
290 # case early with an execption. 279 runner.args.test_type = options.test_type
291 failures = [] 280 runner.args.top_level_dir = options.top_level_dir
292 for fail, _ in results.failures + results.errors: 281 runner.args.write_full_results_to = options.write_full_results_to
293 # When errors in thrown in individual test method or setUp or tearDown, 282 runner.args.write_trace_to = options.write_trace_to
294 # fail would be an instance of unittest.TestCase. 283 runner.args.list_only = options.list_only
295 if isinstance(fail, unittest.TestCase): 284
296 failures.append(fail.shortName()) 285 runner.args.suffixes = TEST_SUFFIXES
297 else: 286
298 # When errors in thrown in setupClass or tearDownClass, an instance of 287 # Since sharding logic is handled by browser_test_runner harness by passing
299 # _ErrorHolder is is placed in results.errors list. We use the id() 288 # browser_test_context.test_case_ids_to_run to subprocess to indicate test
300 # as failure name in this case since shortName() is not available. 289 # cases to be run, we explicitly disable sharding logic in typ.
301 failures.append(fail.id()) 290 runner.args.total_shards = 1
302 failures = sorted(list(failures)) 291 runner.args.shard_index = 0
303 for failure_id in failures: 292
304 json_results['failures'].append(failure_id) 293 runner.args.timing = True
305 for passed_test_case in results.successes: 294 runner.args.verbose = options.verbose
306 json_results['successes'].append(passed_test_case.shortName()) 295 runner.win_multiprocessing = typ.WinMultiprocessing.importable
307 json_results['times'].update(results.times) 296 try:
308 json.dump(json_results, f) 297 ret, _, _ = runner.run()
309 return len(results.failures + results.errors) 298 except KeyboardInterrupt:
299 print >> sys.stderr, "interrupted, exiting"
300 ret = 130
301 return ret
302
303
304 def _SetUpProcess(child, context):
305 del child # Unused.
306 args = context.finder_options
307 if binary_manager.NeedsInit():
308 # On windows, typ doesn't keep the DependencyManager initialization in the
309 # child processes.
310 binary_manager.InitDependencyManager(context.client_configs)
311 if args.remote_platform_options.device == 'android':
312 android_devices = android_device.FindAllAvailableDevices(args)
313 if not android_devices:
314 raise RuntimeError("No Android device found")
315 android_devices.sort(key=lambda device: device.name)
316 args.remote_platform_options.device = (
317 android_devices[child.worker_num-1].guid)
318 browser_test_context._global_test_context = context
319
320
321 def _TearDownProcess(child, context):
322 del child, context # Unused.
323 browser_test_context._global_test_context = None
324
325
326 if __name__ == '__main__':
327 ret_code = RunTests(sys.argv[1:])
328 sys.exit(ret_code)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698