Index: telemetry/telemetry/testing/run_browser_tests.py |
diff --git a/telemetry/telemetry/testing/browser_test_runner.py b/telemetry/telemetry/testing/run_browser_tests.py |
similarity index 57% |
copy from telemetry/telemetry/testing/browser_test_runner.py |
copy to telemetry/telemetry/testing/run_browser_tests.py |
index 7c8ed1b51eda4857cebddd3e94439e4b2e22c39f..6ce835aa1c583d4623b4735fd9abfe31ffe59a62 100644 |
--- a/telemetry/telemetry/testing/browser_test_runner.py |
+++ b/telemetry/telemetry/testing/run_browser_tests.py |
@@ -2,39 +2,40 @@ |
# Use of this source code is governed by a BSD-style license that can be |
# found in the LICENSE file. |
-import argparse |
-import json |
-import logging |
+import fnmatch |
import re |
-import time |
-import unittest |
+import sys |
+import json |
from telemetry.core import discover |
from telemetry.internal.browser import browser_options |
+from telemetry.internal.platform import android_device |
from telemetry.internal.util import binary_manager |
-from telemetry.testing import options_for_unittests |
+from telemetry.testing import browser_test_context |
from telemetry.testing import serially_executed_browser_test_case |
+import typ |
+from typ import arg_parser |
+ |
DEFAULT_LOG_FORMAT = ( |
'(%(levelname)s) %(asctime)s %(module)s.%(funcName)s:%(lineno)d ' |
'%(message)s') |
-def ProcessCommandLineOptions(test_class, project_config, args): |
+TEST_SUFFIXES = ['*_test.py', '*_tests.py', '*_unittest.py', '*_unittests.py'] |
+ |
+ |
+def ProcessCommandLineOptions(test_class, default_chrome_root, args): |
options = browser_options.BrowserFinderOptions() |
options.browser_type = 'any' |
parser = options.CreateParser(test_class.__doc__) |
test_class.AddCommandlineArgs(parser) |
# Set the default chrome root variable. This is required for the |
# Android browser finder to function properly. |
- parser.set_defaults(chrome_root=project_config.default_chrome_root) |
+ if default_chrome_root: |
+ parser.set_defaults(chrome_root=default_chrome_root) |
finder_options, positional_args = parser.parse_args(args) |
finder_options.positional_args = positional_args |
- options_for_unittests.Push(finder_options) |
- # Use this to signal serially_executed_browser_test_case.LoadAllTestsInModule |
- # not to load tests in cases it's not invoked by browser_test_runner |
- # framework. |
- finder_options.browser_test_runner_running = True |
return finder_options |
@@ -144,10 +145,9 @@ def _SplitShardsByTime(test_cases, total_shards, test_times, |
return res |
-def _LoadTests(test_class, finder_options, filter_regex_str, |
- filter_tests_after_sharding, |
- total_shards, shard_index, test_times, |
- debug_shard_distributions): |
+def LoadTestCasesToBeRun( |
+ test_class, finder_options, filter_regex_str, filter_tests_after_sharding, |
+ total_shards, shard_index, test_times, debug_shard_distributions): |
test_cases = [] |
real_regex = re.compile(filter_regex_str) |
noop_regex = re.compile('') |
@@ -184,50 +184,11 @@ def _LoadTests(test_class, finder_options, filter_regex_str, |
if post_filter_regex.search(t.shortName())] |
-class TestRunOptions(object): |
- def __init__(self): |
- self.verbosity = 2 |
- |
- |
-class BrowserTestResult(unittest.TextTestResult): |
- def __init__(self, *args, **kwargs): |
- super(BrowserTestResult, self).__init__(*args, **kwargs) |
- self.successes = [] |
- self.times = {} |
- self._current_test_start_time = 0 |
- |
- def addSuccess(self, test): |
- super(BrowserTestResult, self).addSuccess(test) |
- self.successes.append(test) |
- |
- def startTest(self, test): |
- super(BrowserTestResult, self).startTest(test) |
- self._current_test_start_time = time.time() |
- |
- def stopTest(self, test): |
- super(BrowserTestResult, self).stopTest(test) |
- self.times[test.shortName()] = (time.time() - self._current_test_start_time) |
- |
- |
-def Run(project_config, test_run_options, args, **log_config_kwargs): |
- # the log level is set in browser_options |
- log_config_kwargs.pop('level', None) |
- log_config_kwargs.setdefault('format', DEFAULT_LOG_FORMAT) |
- logging.basicConfig(**log_config_kwargs) |
- |
- binary_manager.InitDependencyManager(project_config.client_configs) |
- parser = argparse.ArgumentParser(description='Run a browser test suite') |
+def _CreateTestArgParsers(): |
+ parser = typ.ArgumentParser(discovery=False, reporting=True, running=True) |
parser.add_argument('test', type=str, help='Name of the test suite to run') |
- parser.add_argument( |
- '--write-abbreviated-json-results-to', metavar='FILENAME', action='store', |
- help=('If specified, writes the full results to that path in json form.')) |
parser.add_argument('--test-filter', type=str, default='', action='store', |
help='Run only tests whose names match the given filter regexp.') |
- parser.add_argument('--total-shards', default=1, type=int, |
- help='Total number of shards being used for this test run. (The user of ' |
- 'this script is responsible for spawning all of the shards.)') |
- parser.add_argument('--shard-index', default=0, type=int, |
- help='Shard index (0..total_shards-1) of this test run.') |
parser.add_argument( |
'--filter-tests-after-sharding', default=False, action='store_true', |
help=('Apply the test filter after tests are split for sharding. Useful ' |
@@ -236,18 +197,56 @@ def Run(project_config, test_run_options, args, **log_config_kwargs): |
'--read-abbreviated-json-results-from', metavar='FILENAME', |
action='store', help=( |
'If specified, reads abbreviated results from that path in json form. ' |
- 'The file format is that written by ' |
- '--write-abbreviated-json-results-to. This information is used to more ' |
- 'evenly distribute tests among shards.')) |
+ 'This information is used to more evenly distribute tests among ' |
+ 'shards.')) |
parser.add_argument('--debug-shard-distributions', |
action='store_true', default=False, |
help='Print debugging information about the shards\' test distributions') |
- option, extra_args = parser.parse_known_args(args) |
- |
- for start_dir in project_config.start_dirs: |
+ parser.add_argument('--default-chrome-root', type=str, default=None) |
+ parser.add_argument('--client-config', dest='client_configs', |
+ action='append', default=[]) |
+ parser.add_argument('--start-dir', dest='start_dirs', |
+ action='append', default=[]) |
+ parser.add_argument('--skip', metavar='glob', default=[], |
+ action='append', |
+ help=('Globs of test names to skip (defaults to %(default)s).')) |
+ return parser |
+ |
+ |
+def _SkipMatch(name, skipGlobs): |
+ return any(fnmatch.fnmatch(name, glob) for glob in skipGlobs) |
+ |
+ |
+def _GetClassifier(args): |
+ def _SeriallyExecutedBrowserTestCaseClassifer(test_set, test): |
+ # Do not pick up tests that do not inherit from |
+ # serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase |
+ # class. |
+ if not isinstance(test, |
+ serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase): |
+ return |
+ name = test.id() |
+ if _SkipMatch(name, args.skip): |
+ test_set.tests_to_skip.append( |
+ typ.TestInput(name, 'skipped because matched --skip')) |
+ return |
+ # For now, only support running these tests serially. |
+ test_set.isolated_tests.append(typ.TestInput(name)) |
+ return _SeriallyExecutedBrowserTestCaseClassifer |
+ |
+ |
+def RunTests(args): |
+ parser = _CreateTestArgParsers() |
+ try: |
+ options, extra_args = parser.parse_known_args(args) |
+ except arg_parser._Bailout: |
+ return parser.exit_status |
+ binary_manager.InitDependencyManager(options.client_configs) |
+ |
+ for start_dir in options.start_dirs: |
modules_to_classes = discover.DiscoverClasses( |
- start_dir, project_config.top_level_dir, |
+ start_dir, options.top_level_dir, |
base_class=serially_executed_browser_test_case. |
SeriallyExecutedBrowserTestCase) |
browser_test_classes = modules_to_classes.values() |
@@ -256,58 +255,103 @@ def Run(project_config, test_run_options, args, **log_config_kwargs): |
test_class = None |
for cl in browser_test_classes: |
- if cl.Name() == option.test: |
+ if cl.Name() == options.test: |
test_class = cl |
break |
if not test_class: |
- print 'Cannot find test class with name matching %s' % option.test |
+ print 'Cannot find test class with name matching %s' % options.test |
print 'Available tests: %s' % '\n'.join( |
cl.Name() for cl in browser_test_classes) |
return 1 |
- options = ProcessCommandLineOptions(test_class, project_config, extra_args) |
- |
+ # Create test context. |
+ context = browser_test_context.TypTestContext() |
+ for c in options.client_configs: |
+ context.client_configs.append(c) |
+ context.finder_options = ProcessCommandLineOptions( |
+ test_class, options.default_chrome_root, extra_args) |
+ context.test_class = test_class |
test_times = None |
- if option.read_abbreviated_json_results_from: |
- with open(option.read_abbreviated_json_results_from, 'r') as f: |
+ if options.read_abbreviated_json_results_from: |
+ with open(options.read_abbreviated_json_results_from, 'r') as f: |
abbr_results = json.load(f) |
test_times = abbr_results.get('times') |
- |
- suite = unittest.TestSuite() |
- for test in _LoadTests(test_class, options, option.test_filter, |
- option.filter_tests_after_sharding, |
- option.total_shards, option.shard_index, |
- test_times, option.debug_shard_distributions): |
- suite.addTest(test) |
- |
- results = unittest.TextTestRunner( |
- verbosity=test_run_options.verbosity, |
- resultclass=BrowserTestResult).run(suite) |
- if option.write_abbreviated_json_results_to: |
- with open(option.write_abbreviated_json_results_to, 'w') as f: |
- json_results = {'failures': [], 'successes': [], |
- 'times': {}, 'valid': True} |
- # Treat failures and errors identically in the JSON |
- # output. Failures are those which cooperatively fail using |
- # Python's unittest APIs; errors are those which abort the test |
- # case early with an execption. |
- failures = [] |
- for fail, _ in results.failures + results.errors: |
- # When errors in thrown in individual test method or setUp or tearDown, |
- # fail would be an instance of unittest.TestCase. |
- if isinstance(fail, unittest.TestCase): |
- failures.append(fail.shortName()) |
- else: |
- # When errors in thrown in setupClass or tearDownClass, an instance of |
- # _ErrorHolder is is placed in results.errors list. We use the id() |
- # as failure name in this case since shortName() is not available. |
- failures.append(fail.id()) |
- failures = sorted(list(failures)) |
- for failure_id in failures: |
- json_results['failures'].append(failure_id) |
- for passed_test_case in results.successes: |
- json_results['successes'].append(passed_test_case.shortName()) |
- json_results['times'].update(results.times) |
- json.dump(json_results, f) |
- return len(results.failures + results.errors) |
+ tests_to_run = LoadTestCasesToBeRun( |
+ test_class=test_class, finder_options=context.finder_options, |
+ filter_regex_str=options.test_filter, |
+ filter_tests_after_sharding=options.filter_tests_after_sharding, |
+ total_shards=options.total_shards, shard_index=options.shard_index, |
+ test_times=test_times, |
+ debug_shard_distributions=options.debug_shard_distributions) |
+ for t in tests_to_run: |
+ context.test_case_ids_to_run.add(t.id()) |
+ context.Freeze() |
+ browser_test_context._global_test_context = context |
+ |
+ # Setup typ runner. |
+ runner = typ.Runner() |
+ |
+ runner.context = context |
+ runner.setup_fn = _SetUpProcess |
+ runner.teardown_fn = _TearDownProcess |
+ |
+ runner.args.jobs = options.jobs |
+ runner.args.metadata = options.metadata |
+ runner.args.passthrough = options.passthrough |
+ runner.args.path = options.path |
+ runner.args.retry_limit = options.retry_limit |
+ runner.args.test_results_server = options.test_results_server |
+ runner.args.test_type = options.test_type |
+ runner.args.top_level_dir = options.top_level_dir |
+ runner.args.write_full_results_to = options.write_full_results_to |
+ runner.args.write_trace_to = options.write_trace_to |
+ runner.args.list_only = options.list_only |
+ runner.classifier = _GetClassifier(options) |
+ |
+ runner.args.suffixes = TEST_SUFFIXES |
+ |
+ # Since sharding logic is handled by browser_test_runner harness by passing |
+ # browser_test_context.test_case_ids_to_run to subprocess to indicate test |
+ # cases to be run, we explicitly disable sharding logic in typ. |
+ runner.args.total_shards = 1 |
+ runner.args.shard_index = 0 |
+ |
+ runner.args.timing = True |
+ runner.args.verbose = options.verbose |
+ runner.win_multiprocessing = typ.WinMultiprocessing.importable |
+ try: |
+ ret, _, _ = runner.run() |
+ except KeyboardInterrupt: |
+ print >> sys.stderr, "interrupted, exiting" |
+ ret = 130 |
+ return ret |
+ |
+ |
+def _SetUpProcess(child, context): |
+ del child # Unused. |
+ args = context.finder_options |
+ if binary_manager.NeedsInit(): |
+ # On windows, typ doesn't keep the DependencyManager initialization in the |
+ # child processes. |
+ binary_manager.InitDependencyManager(context.client_configs) |
+ if args.remote_platform_options.device == 'android': |
+ android_devices = android_device.FindAllAvailableDevices(args) |
+ if not android_devices: |
+ raise RuntimeError("No Android device found") |
+ android_devices.sort(key=lambda device: device.name) |
+ args.remote_platform_options.device = ( |
+ android_devices[child.worker_num-1].guid) |
+ browser_test_context._global_test_context = context |
+ context.test_class.SetUpProcess() |
+ |
+ |
+def _TearDownProcess(child, context): |
+ del child, context # Unused. |
+ browser_test_context._global_test_context.test_class.TearDownProcess() |
+ browser_test_context._global_test_context = None |
+ |
+ |
+if __name__ == '__main__': |
+ ret_code = RunTests(sys.argv[1:]) |
+ sys.exit(ret_code) |