Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(380)

Side by Side Diff: telemetry/telemetry/testing/browser_test_runner.py

Issue 2700563004: [Telemetry] Migrate browser_test_runner to use typ as the test runner (Closed)
Patch Set: Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 import os
5 import subprocess
6 import sys
4 7
5 import argparse 8 from telemetry.core import util
6 import json
7 import logging
8 import re
9 import time
10 import unittest
11
12 from telemetry.core import discover
13 from telemetry.internal.browser import browser_options
14 from telemetry.internal.util import binary_manager
15 from telemetry.testing import options_for_unittests
16 from telemetry.testing import serially_executed_browser_test_case
17
18 DEFAULT_LOG_FORMAT = (
19 '(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d '
20 '%(message)s')
21 9
22 10
23 def ProcessCommandLineOptions(test_class, project_config, args): 11 def Run(project_config, args):
24 options = browser_options.BrowserFinderOptions() 12 assert '--top-level-dir' not in args, (
25 options.browser_type = 'any' 13 'Top level directory for running tests should be specified through '
26 parser = options.CreateParser(test_class.__doc__) 14 'the instance of telemetry.project_config.ProjectConfig.')
27 test_class.AddCommandlineArgs(parser) 15 assert '--client-config' not in args, (
28 # Set the default chrome root variable. This is required for the 16 'Client config file to be used for telemetry should be specified through '
29 # Android browser finder to function properly. 17 'the instance of telemetry.project_config.ProjectConfig.')
30 parser.set_defaults(chrome_root=project_config.default_chrome_root) 18 assert project_config.top_level_dir, 'Must specify top level dir for project'
31 finder_options, positional_args = parser.parse_args(args) 19 args.extend(['--top-level-dir', project_config.top_level_dir])
32 finder_options.positional_args = positional_args 20 for c in project_config.client_configs:
33 options_for_unittests.Push(finder_options) 21 args.extend(['--client-config', c])
34 # Use this to signal serially_executed_browser_test_case.LoadAllTestsInModule 22 for s in project_config.start_dirs:
35 # not to load tests in cases it's not invoked by browser_test_runner 23 args.extend(['--start-dir', s])
36 # framework.
37 finder_options.browser_test_runner_running = True
38 return finder_options
39 24
25 if project_config.default_chrome_root and not '--chrome-root' in args:
26 args.extend(['--chrome-root', project_config.default_chrome_root])
40 27
41 def _ValidateDistinctNames(browser_test_classes): 28 env = os.environ.copy()
42 names_to_test_classes = {} 29 telemetry_dir = util.GetTelemetryDir()
43 for cl in browser_test_classes: 30 if 'PYTHONPATH' in env:
44 name = cl.Name() 31 env['PYTHONPATH'] = os.pathsep.join([env['PYTHONPATH'], telemetry_dir])
45 if name in names_to_test_classes: 32 else:
46 raise Exception('Test name %s is duplicated between %s and %s' % ( 33 env['PYTHONPATH'] = telemetry_dir
47 name, repr(cl), repr(names_to_test_classes[name])))
48 names_to_test_classes[name] = cl
49 34
50 35 path_to_run_tests = os.path.join(os.path.abspath(os.path.dirname(__file__)),
51 def _TestRangeForShard(total_shards, shard_index, num_tests): 36 'run_browser_tests.py')
52 """Returns a 2-tuple containing the start (inclusive) and ending 37 return subprocess.call([sys.executable, path_to_run_tests] + args, env=env)
53 (exclusive) indices of the tests that should be run, given that
54 |num_tests| tests are split across |total_shards| shards, and that
55 |shard_index| is currently being run.
56 """
57 assert num_tests >= 0
58 assert total_shards >= 1
59 assert shard_index >= 0 and shard_index < total_shards, (
60 'shard_index (%d) must be >= 0 and < total_shards (%d)' %
61 (shard_index, total_shards))
62 if num_tests == 0:
63 return (0, 0)
64 floored_tests_per_shard = num_tests // total_shards
65 remaining_tests = num_tests % total_shards
66 if remaining_tests == 0:
67 return (floored_tests_per_shard * shard_index,
68 floored_tests_per_shard * (1 + shard_index))
69 # More complicated. Some shards will run floored_tests_per_shard
70 # tests, and some will run 1 + floored_tests_per_shard.
71 num_earlier_shards_with_one_extra_test = min(remaining_tests, shard_index)
72 num_earlier_shards_with_no_extra_tests = max(
73 0, shard_index - num_earlier_shards_with_one_extra_test)
74 num_earlier_tests = (
75 num_earlier_shards_with_one_extra_test * (floored_tests_per_shard + 1) +
76 num_earlier_shards_with_no_extra_tests * floored_tests_per_shard)
77 tests_for_this_shard = floored_tests_per_shard
78 if shard_index < remaining_tests:
79 tests_for_this_shard += 1
80 return (num_earlier_tests, num_earlier_tests + tests_for_this_shard)
81
82
83 def _MedianTestTime(test_times):
84 times = test_times.values()
85 times.sort()
86 if len(times) == 0:
87 return 0
88 halfLen = len(times) / 2
89 if len(times) % 2:
90 return times[halfLen]
91 else:
92 return 0.5 * (times[halfLen - 1] + times[halfLen])
93
94
95 def _TestTime(test, test_times, default_test_time):
96 return test_times.get(test.shortName()) or default_test_time
97
98
99 def _DebugShardDistributions(shards, test_times):
100 for i, s in enumerate(shards):
101 num_tests = len(s)
102 if test_times:
103 median = _MedianTestTime(test_times)
104 shard_time = 0.0
105 for t in s:
106 shard_time += _TestTime(t, test_times, median)
107 print 'shard %d: %d seconds (%d tests)' % (i, shard_time, num_tests)
108 else:
109 print 'shard %d: %d tests (unknown duration)' % (i, num_tests)
110
111
112 def _SplitShardsByTime(test_cases, total_shards, test_times,
113 debug_shard_distributions):
114 median = _MedianTestTime(test_times)
115 shards = []
116 for i in xrange(total_shards):
117 shards.append({'total_time': 0.0, 'tests': []})
118 test_cases.sort(key=lambda t: _TestTime(t, test_times, median),
119 reverse=True)
120
121 # The greedy algorithm has been empirically tested on the WebGL 2.0
122 # conformance tests' times, and results in an essentially perfect
123 # shard distribution of 530 seconds per shard. In the same scenario,
124 # round-robin scheduling resulted in shard times spread between 502
125 # and 592 seconds, and the current alphabetical sharding resulted in
126 # shard times spread between 44 and 1591 seconds.
127
128 # Greedy scheduling. O(m*n), where m is the number of shards and n
129 # is the number of test cases.
130 for t in test_cases:
131 min_shard_index = 0
132 min_shard_time = None
133 for i in xrange(total_shards):
134 if min_shard_time is None or shards[i]['total_time'] < min_shard_time:
135 min_shard_index = i
136 min_shard_time = shards[i]['total_time']
137 shards[min_shard_index]['tests'].append(t)
138 shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median)
139
140 res = [s['tests'] for s in shards]
141 if debug_shard_distributions:
142 _DebugShardDistributions(res, test_times)
143
144 return res
145
146
147 def _LoadTests(test_class, finder_options, filter_regex_str,
148 filter_tests_after_sharding,
149 total_shards, shard_index, test_times,
150 debug_shard_distributions):
151 test_cases = []
152 real_regex = re.compile(filter_regex_str)
153 noop_regex = re.compile('')
154 if filter_tests_after_sharding:
155 filter_regex = noop_regex
156 post_filter_regex = real_regex
157 else:
158 filter_regex = real_regex
159 post_filter_regex = noop_regex
160
161 for t in serially_executed_browser_test_case.GenerateTestCases(
162 test_class, finder_options):
163 if filter_regex.search(t.shortName()):
164 test_cases.append(t)
165
166 if test_times:
167 # Assign tests to shards.
168 shards = _SplitShardsByTime(test_cases, total_shards, test_times,
169 debug_shard_distributions)
170 return [t for t in shards[shard_index]
171 if post_filter_regex.search(t.shortName())]
172 else:
173 test_cases.sort(key=lambda t: t.shortName())
174 test_range = _TestRangeForShard(total_shards, shard_index, len(test_cases))
175 if debug_shard_distributions:
176 tmp_shards = []
177 for i in xrange(total_shards):
178 tmp_range = _TestRangeForShard(total_shards, i, len(test_cases))
179 tmp_shards.append(test_cases[tmp_range[0]:tmp_range[1]])
180 # Can edit the code to get 'test_times' passed in here for
181 # debugging and comparison purposes.
182 _DebugShardDistributions(tmp_shards, None)
183 return [t for t in test_cases[test_range[0]:test_range[1]]
184 if post_filter_regex.search(t.shortName())]
185
186
187 class TestRunOptions(object):
188 def __init__(self):
189 self.verbosity = 2
190
191
192 class BrowserTestResult(unittest.TextTestResult):
193 def __init__(self, *args, **kwargs):
194 super(BrowserTestResult, self).__init__(*args, **kwargs)
195 self.successes = []
196 self.times = {}
197 self._current_test_start_time = 0
198
199 def addSuccess(self, test):
200 super(BrowserTestResult, self).addSuccess(test)
201 self.successes.append(test)
202
203 def startTest(self, test):
204 super(BrowserTestResult, self).startTest(test)
205 self._current_test_start_time = time.time()
206
207 def stopTest(self, test):
208 super(BrowserTestResult, self).stopTest(test)
209 self.times[test.shortName()] = (time.time() - self._current_test_start_time)
210
211
212 def Run(project_config, test_run_options, args, **log_config_kwargs):
213 # the log level is set in browser_options
214 log_config_kwargs.pop('level', None)
215 log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT)
216 logging.basicConfig(**log_config_kwargs)
217
218 binary_manager.InitDependencyManager(project_config.client_configs)
219 parser = argparse.ArgumentParser(description='Run a browser test suite')
220 parser.add_argument('test', type=str, help='Name of the test suite to run')
221 parser.add_argument(
222 '--write-abbreviated-json-results-to', metavar='FILENAME', action='store',
223 help=('If specified, writes the full results to that path in json form.'))
224 parser.add_argument('--test-filter', type=str, default='', action='store',
225 help='Run only tests whose names match the given filter regexp.')
226 parser.add_argument('--total-shards', default=1, type=int,
227 help='Total number of shards being used for this test run. (The user of '
228 'this script is responsible for spawning all of the shards.)')
229 parser.add_argument('--shard-index', default=0, type=int,
230 help='Shard index (0..total_shards-1) of this test run.')
231 parser.add_argument(
232 '--filter-tests-after-sharding', default=False, action='store_true',
233 help=('Apply the test filter after tests are split for sharding. Useful '
234 'for reproducing bugs related to the order in which tests run.'))
235 parser.add_argument(
236 '--read-abbreviated-json-results-from', metavar='FILENAME',
237 action='store', help=(
238 'If specified, reads abbreviated results from that path in json form. '
239 'The file format is that written by '
240 '--write-abbreviated-json-results-to. This information is used to more '
241 'evenly distribute tests among shards.'))
242 parser.add_argument('--debug-shard-distributions',
243 action='store_true', default=False,
244 help='Print debugging information about the shards\' test distributions')
245
246 option, extra_args = parser.parse_known_args(args)
247
248 for start_dir in project_config.start_dirs:
249 modules_to_classes = discover.DiscoverClasses(
250 start_dir, project_config.top_level_dir,
251 base_class=serially_executed_browser_test_case.
252 SeriallyExecutedBrowserTestCase)
253 browser_test_classes = modules_to_classes.values()
254
255 _ValidateDistinctNames(browser_test_classes)
256
257 test_class = None
258 for cl in browser_test_classes:
259 if cl.Name() == option.test:
260 test_class = cl
261 break
262
263 if not test_class:
264 print 'Cannot find test class with name matching %s' % option.test
265 print 'Available tests: %s' % '\n'.join(
266 cl.Name() for cl in browser_test_classes)
267 return 1
268
269 options = ProcessCommandLineOptions(test_class, project_config, extra_args)
270
271 test_times = None
272 if option.read_abbreviated_json_results_from:
273 with open(option.read_abbreviated_json_results_from, 'r') as f:
274 abbr_results = json.load(f)
275 test_times = abbr_results.get('times')
276
277 suite = unittest.TestSuite()
278 for test in _LoadTests(test_class, options, option.test_filter,
279 option.filter_tests_after_sharding,
280 option.total_shards, option.shard_index,
281 test_times, option.debug_shard_distributions):
282 suite.addTest(test)
283
284 results = unittest.TextTestRunner(
285 verbosity=test_run_options.verbosity,
286 resultclass=BrowserTestResult).run(suite)
287 if option.write_abbreviated_json_results_to:
288 with open(option.write_abbreviated_json_results_to, 'w') as f:
289 json_results = {'failures': [], 'successes': [],
290 'times': {}, 'valid': True}
291 # Treat failures and errors identically in the JSON
292 # output. Failures are those which cooperatively fail using
293 # Python's unittest APIs; errors are those which abort the test
294 # case early with an execption.
295 failures = []
296 for fail, _ in results.failures + results.errors:
297 # When errors in thrown in individual test method or setUp or tearDown,
298 # fail would be an instance of unittest.TestCase.
299 if isinstance(fail, unittest.TestCase):
300 failures.append(fail.shortName())
301 else:
302 # When errors in thrown in setupClass or tearDownClass, an instance of
303 # _ErrorHolder is is placed in results.errors list. We use the id()
304 # as failure name in this case since shortName() is not available.
305 failures.append(fail.id())
306 failures = sorted(list(failures))
307 for failure_id in failures:
308 json_results['failures'].append(failure_id)
309 for passed_test_case in results.successes:
310 json_results['successes'].append(passed_test_case.shortName())
311 json_results['times'].update(results.times)
312 json.dump(json_results, f)
313 return len(results.failures + results.errors)
OLDNEW
« no previous file with comments | « telemetry/telemetry/testing/browser_test_context.py ('k') | telemetry/telemetry/testing/browser_test_runner_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698