OLD | NEW |
1 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Dispatches content_browsertests.""" | 5 """Generate test runner factory and tests for content_browsertests.""" |
6 | 6 |
7 import logging | 7 import logging |
8 import os | 8 import os |
9 import sys | 9 import sys |
10 | 10 |
11 from pylib import android_commands | 11 from pylib import android_commands |
12 from pylib import cmd_helper | 12 from pylib import cmd_helper |
13 from pylib import constants | 13 from pylib import constants |
14 from pylib import ports | 14 from pylib import ports |
15 from pylib.base import base_test_result | 15 from pylib.base import base_test_result |
16 from pylib.base import shard | 16 from pylib.gtest import setup as gtest_setup |
17 from pylib.gtest import dispatch as gtest_dispatch | |
18 from pylib.gtest import test_runner | 17 from pylib.gtest import test_runner |
19 from pylib.utils import report_results | 18 from pylib.utils import report_results |
20 | 19 |
21 sys.path.insert(0, | 20 sys.path.insert(0, |
22 os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib')) | 21 os.path.join(constants.DIR_SOURCE_ROOT, 'build', 'util', 'lib')) |
23 from common import unittest_util | 22 from common import unittest_util |
24 | 23 |
25 | 24 |
26 def Dispatch(options): | 25 def Setup(test_arguments, timeout, cleanup_test_files, tool, build_type, |
27 """Dispatches all content_browsertests. | 26 webkit, push_deps, gtest_filter): |
| 27 """Create the test runner factory and tests. |
28 | 28 |
29 Args: | 29 Args: |
30 options: optparse.Options object containing command-line options | 30 test_arguments: Additional arguments to pass to the test binary. |
| 31 timeout: Timeout for each test. |
| 32 cleanup_test_files: Whether or not to cleanup test files on device. |
| 33 tool: Name of the Valgrind tool. |
| 34 build_type: 'Release' or 'Debug'. |
| 35 webkit: Whether the suite is being run from a WebKit checkout. |
| 36 push_deps: If True, push all dependencies to the device. |
| 37 gtest_filter: filter for tests. |
| 38 |
31 Returns: | 39 Returns: |
32 A tuple of (base_test_result.TestRunResults object, exit code). | 40 A tuple of (TestRunnerFactory, tests). |
33 Raises: | |
34 Exception: Failed to reset the test server port. | |
35 """ | 41 """ |
36 | 42 |
37 attached_devices = [] | 43 test_suite_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type) |
38 if options.test_device: | 44 test_suite = os.path.join(test_suite_dir, 'apks', |
39 attached_devices = [options.test_device] | 45 constants.BROWSERTEST_SUITE_NAME + '.apk') |
40 else: | |
41 attached_devices = android_commands.GetAttachedDevices() | |
42 | |
43 if not attached_devices: | |
44 logging.critical('A device must be attached and online.') | |
45 return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE) | |
46 | |
47 # Reset the test port allocation. It's important to do it before starting | |
48 # to dispatch any tests. | |
49 if not ports.ResetTestServerPortAllocation(): | |
50 raise Exception('Failed to reset test server port.') | |
51 | |
52 test_suite_dir = os.path.join(cmd_helper.OutDirectory.get(), | |
53 options.build_type) | |
54 options.test_suite = os.path.join(test_suite_dir, | |
55 'apks', | |
56 constants.BROWSERTEST_SUITE_NAME + '.apk') | |
57 | 46 |
58 # Constructs a new TestRunner with the current options. | 47 # Constructs a new TestRunner with the current options. |
59 def RunnerFactory(device, shard_index): | 48 def TestRunnerFactory(device, shard_index): |
60 return test_runner.TestRunner( | 49 return test_runner.TestRunner( |
61 device, | 50 device, |
62 options.test_suite, | 51 test_suite, |
63 options.test_arguments, | 52 test_arguments, |
64 options.timeout, | 53 timeout, |
65 options.cleanup_test_files, | 54 cleanup_test_files, |
66 options.tool, | 55 tool, |
67 options.build_type, | 56 build_type, |
68 options.webkit, | 57 webkit, |
69 options.push_deps, | 58 push_deps, |
70 constants.BROWSERTEST_TEST_PACKAGE_NAME, | 59 constants.BROWSERTEST_TEST_PACKAGE_NAME, |
71 constants.BROWSERTEST_TEST_ACTIVITY_NAME, | 60 constants.BROWSERTEST_TEST_ACTIVITY_NAME, |
72 constants.BROWSERTEST_COMMAND_LINE_FILE) | 61 constants.BROWSERTEST_COMMAND_LINE_FILE) |
73 | 62 |
| 63 # TODO(gkanwar): This breaks the abstraction of having test_dispatcher.py deal |
| 64 # entirely with the devices. Can we do this another way? |
| 65 attached_devices = android_commands.GetAttachedDevices() |
74 # Get tests and split them up based on the number of devices. | 66 # Get tests and split them up based on the number of devices. |
75 all_enabled = gtest_dispatch.GetAllEnabledTests(RunnerFactory, | 67 all_enabled = gtest_setup.GetAllEnabledTests(TestRunnerFactory, |
76 attached_devices) | 68 attached_devices) |
77 if options.test_filter: | 69 if gtest_filter: |
78 all_tests = unittest_util.FilterTestNames(all_enabled, | 70 all_tests = unittest_util.FilterTestNames(all_enabled, |
79 options.test_filter) | 71 gtest_filter) |
80 else: | 72 else: |
81 all_tests = _FilterTests(all_enabled) | 73 all_tests = _FilterTests(all_enabled) |
82 | 74 |
83 # Run tests. | 75 return (TestRunnerFactory, all_tests) |
84 # TODO(nileshagrawal): remove this abnormally long setup timeout once fewer | |
85 # files are pushed to the devices for content_browsertests: crbug.com/138275 | |
86 setup_timeout = 20 * 60 # 20 minutes | |
87 test_results, exit_code = shard.ShardAndRunTests( | |
88 RunnerFactory, attached_devices, all_tests, options.build_type, | |
89 setup_timeout=setup_timeout, test_timeout=None, | |
90 num_retries=options.num_retries) | |
91 report_results.LogFull( | |
92 results=test_results, | |
93 test_type='Unit test', | |
94 test_package=constants.BROWSERTEST_SUITE_NAME, | |
95 build_type=options.build_type, | |
96 flakiness_server=options.flakiness_dashboard_server) | |
97 | |
98 return (test_results, exit_code) | |
99 | 76 |
100 | 77 |
101 def _FilterTests(all_enabled_tests): | 78 def _FilterTests(all_enabled_tests): |
102 """Filters out tests and fixtures starting with PRE_ and MANUAL_.""" | 79 """Filters out tests and fixtures starting with PRE_ and MANUAL_.""" |
103 return [t for t in all_enabled_tests if _ShouldRunOnBot(t)] | 80 return [t for t in all_enabled_tests if _ShouldRunOnBot(t)] |
104 | 81 |
105 | 82 |
106 def _ShouldRunOnBot(test): | 83 def _ShouldRunOnBot(test): |
107 fixture, case = test.split('.', 1) | 84 fixture, case = test.split('.', 1) |
108 if _StartsWith(fixture, case, 'PRE_'): | 85 if _StartsWith(fixture, case, 'PRE_'): |
109 return False | 86 return False |
110 if _StartsWith(fixture, case, 'MANUAL_'): | 87 if _StartsWith(fixture, case, 'MANUAL_'): |
111 return False | 88 return False |
112 return True | 89 return True |
113 | 90 |
114 | 91 |
115 def _StartsWith(a, b, prefix): | 92 def _StartsWith(a, b, prefix): |
116 return a.startswith(prefix) or b.startswith(prefix) | 93 return a.startswith(prefix) or b.startswith(prefix) |
OLD | NEW |