Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(281)

Unified Diff: telemetry/telemetry/testing/browser_test_runner.py

Issue 2162963002: [polymer] Merge of master into polymer10-migration (Closed) Base URL: git@github.com:catapult-project/catapult.git@polymer10-migration
Patch Set: Merge polymer10-migration int polymer10-merge Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: telemetry/telemetry/testing/browser_test_runner.py
diff --git a/telemetry/telemetry/testing/browser_test_runner.py b/telemetry/telemetry/testing/browser_test_runner.py
index 5a4dd4588f78f83ec6e2ecf7438349a52b3121a7..cea4e0e0c005b299e6523cbdb071030bb4bb6a7c 100644
--- a/telemetry/telemetry/testing/browser_test_runner.py
+++ b/telemetry/telemetry/testing/browser_test_runner.py
@@ -6,6 +6,7 @@ import argparse
import inspect
import json
import re
+import time
import unittest
from telemetry.core import discover
@@ -15,18 +16,21 @@ from telemetry.testing import options_for_unittests
from telemetry.testing import serially_executed_browser_test_case
-def ProcessCommandLineOptions(test_class, args):
+def ProcessCommandLineOptions(test_class, project_config, args):
options = browser_options.BrowserFinderOptions()
options.browser_type = 'any'
parser = options.CreateParser(test_class.__doc__)
test_class.AddCommandlineArgs(parser)
+ # Set the default chrome root variable. This is required for the
+ # Android browser finder to function properly.
+ parser.set_defaults(chrome_root=project_config.default_chrome_root)
finder_options, positional_args = parser.parse_args(args)
finder_options.positional_args = positional_args
options_for_unittests.Push(finder_options)
return finder_options
-def ValidateDistinctNames(browser_test_classes):
+def _ValidateDistinctNames(browser_test_classes):
names_to_test_classes = {}
for cl in browser_test_classes:
name = cl.Name()
@@ -36,16 +40,16 @@ def ValidateDistinctNames(browser_test_classes):
names_to_test_classes[name] = cl
-def GenerateTestMethod(based_method, args):
+def _GenerateTestMethod(based_method, args):
return lambda self: based_method(self, *args)
_INVALID_TEST_NAME_RE = re.compile(r'[^a-zA-Z0-9_]')
-def ValidateTestMethodname(test_name):
+def _ValidateTestMethodname(test_name):
assert not bool(_INVALID_TEST_NAME_RE.search(test_name))
-def TestRangeForShard(total_shards, shard_index, num_tests):
+def _TestRangeForShard(total_shards, shard_index, num_tests):
"""Returns a 2-tuple containing the start (inclusive) and ending
(exclusive) indices of the tests that should be run, given that
|num_tests| tests are split across |total_shards| shards, and that
@@ -77,12 +81,85 @@ def TestRangeForShard(total_shards, shard_index, num_tests):
return (num_earlier_tests, num_earlier_tests + tests_for_this_shard)
+def _MedianTestTime(test_times):
+ times = test_times.values()
+ times.sort()
+ if len(times) == 0:
+ return 0
+ halfLen = len(times) / 2
+ if len(times) % 2:
+ return times[halfLen]
+ else:
+ return 0.5 * (times[halfLen - 1] + times[halfLen])
+
+
+def _TestTime(test, test_times, default_test_time):
+ return test_times.get(test.shortName()) or default_test_time
+
+
+def _DebugShardDistributions(shards, test_times):
+ for i, s in enumerate(shards):
+ num_tests = len(s)
+ if test_times:
+ median = _MedianTestTime(test_times)
+ shard_time = 0.0
+ for t in s:
+ shard_time += _TestTime(t, test_times, median)
+ print 'shard %d: %d seconds (%d tests)' % (i, shard_time, num_tests)
+ else:
+ print 'shard %d: %d tests (unknown duration)' % (i, num_tests)
+
+
+def _SplitShardsByTime(test_cases, total_shards, test_times,
+ debug_shard_distributions):
+ median = _MedianTestTime(test_times)
+ shards = []
+ for i in xrange(total_shards):
+ shards.append({'total_time': 0.0, 'tests': []})
+ test_cases.sort(key=lambda t: _TestTime(t, test_times, median),
+ reverse=True)
+
+ # The greedy algorithm has been empirically tested on the WebGL 2.0
+ # conformance tests' times, and results in an essentially perfect
+ # shard distribution of 530 seconds per shard. In the same scenario,
+ # round-robin scheduling resulted in shard times spread between 502
+ # and 592 seconds, and the current alphabetical sharding resulted in
+ # shard times spread between 44 and 1591 seconds.
+
+ # Greedy scheduling. O(m*n), where m is the number of shards and n
+ # is the number of test cases.
+ for t in test_cases:
+ min_shard_index = 0
+ min_shard_time = None
+ for i in xrange(total_shards):
+ if min_shard_time is None or shards[i]['total_time'] < min_shard_time:
+ min_shard_index = i
+ min_shard_time = shards[i]['total_time']
+ shards[min_shard_index]['tests'].append(t)
+ shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median)
+
+ res = [s['tests'] for s in shards]
+ if debug_shard_distributions:
+ _DebugShardDistributions(res, test_times)
+
+ return res
+
+
_TEST_GENERATOR_PREFIX = 'GenerateTestCases_'
-def LoadTests(test_class, finder_options, filter_regex_str,
- total_shards, shard_index):
+def _LoadTests(test_class, finder_options, filter_regex_str,
+ filter_tests_after_sharding,
+ total_shards, shard_index, test_times,
+ debug_shard_distributions):
test_cases = []
- filter_regex = re.compile(filter_regex_str)
+ real_regex = re.compile(filter_regex_str)
+ noop_regex = re.compile('')
+ if filter_tests_after_sharding:
+ filter_regex = noop_regex
+ post_filter_regex = real_regex
+ else:
+ filter_regex = real_regex
+ post_filter_regex = noop_regex
for name, method in inspect.getmembers(
test_class, predicate=inspect.ismethod):
if name.startswith('test'):
@@ -101,14 +178,30 @@ def LoadTests(test_class, finder_options, filter_regex_str,
name, based_method_name)
based_method = getattr(test_class, based_method_name)
for generated_test_name, args in method(finder_options):
- ValidateTestMethodname(generated_test_name)
+ _ValidateTestMethodname(generated_test_name)
if filter_regex.search(generated_test_name):
- setattr(test_class, generated_test_name, GenerateTestMethod(
+ setattr(test_class, generated_test_name, _GenerateTestMethod(
based_method, args))
test_cases.append(test_class(generated_test_name))
- test_cases.sort(key=lambda t: t.id())
- test_range = TestRangeForShard(total_shards, shard_index, len(test_cases))
- return test_cases[test_range[0]:test_range[1]]
+ if test_times:
+ # Assign tests to shards.
+ shards = _SplitShardsByTime(test_cases, total_shards, test_times,
+ debug_shard_distributions)
+ return [t for t in shards[shard_index]
+ if post_filter_regex.search(t.shortName())]
+ else:
+ test_cases.sort(key=lambda t: t.shortName())
+ test_range = _TestRangeForShard(total_shards, shard_index, len(test_cases))
+ if debug_shard_distributions:
+ tmp_shards = []
+ for i in xrange(total_shards):
+ tmp_range = _TestRangeForShard(total_shards, i, len(test_cases))
+ tmp_shards.append(test_cases[tmp_range[0]:tmp_range[1]])
+ # Can edit the code to get 'test_times' passed in here for
+ # debugging and comparison purposes.
+ _DebugShardDistributions(tmp_shards, None)
+ return [t for t in test_cases[test_range[0]:test_range[1]]
+ if post_filter_regex.search(t.shortName())]
class TestRunOptions(object):
@@ -120,11 +213,21 @@ class BrowserTestResult(unittest.TextTestResult):
def __init__(self, *args, **kwargs):
super(BrowserTestResult, self).__init__(*args, **kwargs)
self.successes = []
+ self.times = {}
+ self._current_test_start_time = 0
def addSuccess(self, test):
super(BrowserTestResult, self).addSuccess(test)
self.successes.append(test)
+ def startTest(self, test):
+ super(BrowserTestResult, self).startTest(test)
+ self._current_test_start_time = time.time()
+
+ def stopTest(self, test):
+ super(BrowserTestResult, self).stopTest(test)
+ self.times[test.shortName()] = (time.time() - self._current_test_start_time)
+
def Run(project_config, test_run_options, args):
binary_manager.InitDependencyManager(project_config.client_configs)
@@ -140,15 +243,31 @@ def Run(project_config, test_run_options, args):
'this script is responsible for spawning all of the shards.)')
parser.add_argument('--shard-index', default=0, type=int,
help='Shard index (0..total_shards-1) of this test run.')
+ parser.add_argument(
+ '--filter-tests-after-sharding', default=False, action='store_true',
+ help=('Apply the test filter after tests are split for sharding. Useful '
+ 'for reproducing bugs related to the order in which tests run.'))
+ parser.add_argument(
+ '--read-abbreviated-json-results-from', metavar='FILENAME',
+ action='store', help=(
+ 'If specified, reads abbreviated results from that path in json form. '
+ 'The file format is that written by '
+ '--write-abbreviated-json-results-to. This information is used to more '
+ 'evenly distribute tests among shards.'))
+ parser.add_argument('--debug-shard-distributions',
+ action='store_true', default=False,
+ help='Print debugging information about the shards\' test distributions')
+
option, extra_args = parser.parse_known_args(args)
for start_dir in project_config.start_dirs:
modules_to_classes = discover.DiscoverClasses(
start_dir, project_config.top_level_dir,
- base_class=serially_executed_browser_test_case.SeriallyBrowserTestCase)
+ base_class=serially_executed_browser_test_case.
+ SeriallyExecutedBrowserTestCase)
browser_test_classes = modules_to_classes.values()
- ValidateDistinctNames(browser_test_classes)
+ _ValidateDistinctNames(browser_test_classes)
test_class = None
for cl in browser_test_classes:
@@ -162,11 +281,19 @@ def Run(project_config, test_run_options, args):
cl.Name() for cl in browser_test_classes)
return 1
- options = ProcessCommandLineOptions(test_class, extra_args)
+ options = ProcessCommandLineOptions(test_class, project_config, extra_args)
+
+ test_times = None
+ if option.read_abbreviated_json_results_from:
+ with open(option.read_abbreviated_json_results_from, 'r') as f:
+ abbr_results = json.load(f)
+ test_times = abbr_results.get('times')
suite = unittest.TestSuite()
- for test in LoadTests(test_class, options, option.test_filter,
- option.total_shards, option.shard_index):
+ for test in _LoadTests(test_class, options, option.test_filter,
+ option.filter_tests_after_sharding,
+ option.total_shards, option.shard_index,
+ test_times, option.debug_shard_distributions):
suite.addTest(test)
results = unittest.TextTestRunner(
@@ -174,18 +301,28 @@ def Run(project_config, test_run_options, args):
resultclass=BrowserTestResult).run(suite)
if option.write_abbreviated_json_results_to:
with open(option.write_abbreviated_json_results_to, 'w') as f:
- json_results = {'failures': [], 'successes': [], 'valid': True}
+ json_results = {'failures': [], 'successes': [],
+ 'times': {}, 'valid': True}
# Treat failures and errors identically in the JSON
# output. Failures are those which cooperatively fail using
# Python's unittest APIs; errors are those which abort the test
# case early with an execption.
failures = []
- failures.extend(results.failures)
- failures.extend(results.errors)
- failures.sort(key=lambda entry: entry[0].id())
- for (failed_test_case, _) in failures:
- json_results['failures'].append(failed_test_case.id())
+ for fail, _ in results.failures + results.errors:
+ # When errors in thrown in individual test method or setUp or tearDown,
+ # fail would be an instance of unittest.TestCase.
+ if isinstance(fail, unittest.TestCase):
+ failures.append(fail.shortName())
+ else:
+ # When errors in thrown in setupClass or tearDownClass, an instance of
+ # _ErrorHolder is is placed in results.errors list. We use the id()
+ # as failure name in this case since shortName() is not available.
+ failures.append(fail.id())
+ failures = sorted(list(failures))
+ for failure_id in failures:
+ json_results['failures'].append(failure_id)
for passed_test_case in results.successes:
- json_results['successes'].append(passed_test_case.id())
+ json_results['successes'].append(passed_test_case.shortName())
+ json_results['times'].update(results.times)
json.dump(json_results, f)
return len(results.failures + results.errors)
« no previous file with comments | « telemetry/telemetry/page/legacy_page_test.py ('k') | telemetry/telemetry/testing/browser_test_runner_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698