Chromium Code Reviews| Index: build/android/pylib/base/dispatch.py |
| diff --git a/build/android/pylib/base/shard.py b/build/android/pylib/base/dispatch.py |
| similarity index 63% |
| rename from build/android/pylib/base/shard.py |
| rename to build/android/pylib/base/dispatch.py |
| index 8c429f7d3d01a9df148d25089260e9f085f50724..97473173c855e533689acc7904f7efcc77dda8c6 100644 |
| --- a/build/android/pylib/base/shard.py |
| +++ b/build/android/pylib/base/dispatch.py |
| @@ -118,7 +118,7 @@ class _TestCollection(object): |
| def _RunTestsFromQueue(runner, test_collection, out_results, watcher, |
| - num_retries): |
| + num_retries, tag_results_with_device=False): |
| """Runs tests from the test_collection until empty using the given runner. |
| Adds TestRunResults objects to the out_results list and may add tests to the |
| @@ -130,7 +130,22 @@ def _RunTestsFromQueue(runner, test_collection, out_results, watcher, |
| out_results: A list to add TestRunResults to. |
| watcher: A watchdog_timer.WatchdogTimer object, used as a shared timeout. |
| num_retries: Number of retries for a test. |
| + tag_results_with_device: If True, appends the name of the device on which |
| + the test was run to the test name. Used by ReplicateAndRunTests to |
| + identify which device ran each copy of the test, and to ensure each copy |
| + of the test is recorded separately. |
| """ |
| + |
| + # Used to tag all results to identify which device caused failing tests |
| + def TagTestRunResults(test_run_results): |
| + new_test_run_results = base_test_result.TestRunResults() |
| + for test_result in test_run_results.GetAll(): |
| + new_result = base_test_result.BaseTestResult( |
| + '%s_%s' % (runner.device, test_result.GetName()), |
| + test_result.GetType(), test_result.GetLog()) |
| + new_test_run_results.AddResult(new_result) |
| + return new_test_run_results |
| + |
| for test in test_collection: |
| watcher.Reset() |
| try: |
| @@ -140,10 +155,14 @@ def _RunTestsFromQueue(runner, test_collection, out_results, watcher, |
| logging.warning(msg) |
| raise android_commands.errors.DeviceUnresponsiveError(msg) |
| result, retry = runner.RunTest(test.test) |
| + if tag_results_with_device: |
| + result = TagTestRunResults(result) |
| test.tries += 1 |
| if retry and test.tries <= num_retries: |
| # Retry non-passing results, only record passing results. |
| pass_results = base_test_result.TestRunResults() |
| + # Tag all results with the device, so we can identify the failing device |
| + # for replicated tests. |
| pass_results.AddResults(result.GetPass()) |
| out_results.append(pass_results) |
| logging.warning('Will retry test, try #%s.' % test.tries) |
| @@ -184,28 +203,34 @@ def _SetUp(runner_factory, device, out_runners, threadsafe_counter): |
| logging.warning('Failed to create shard for %s: [%s]', device, e) |
| -def _RunAllTests(runners, tests, num_retries, timeout=None): |
| +def _RunAllTests(runners, test_collection_factory, num_retries, timeout=None, |
| + tag_results_with_device=False): |
| """Run all tests using the given TestRunners. |
| Args: |
| runners: a list of TestRunner objects. |
| - tests: a list of Tests to run using the given TestRunners. |
| + test_collection_factory: a callable to generate a _TestCollection object for |
| + each test runner. |
| num_retries: number of retries for a test. |
| timeout: watchdog timeout in seconds, defaults to the default timeout. |
| + tag_results_with_device: If True, appends the name of the device on which |
| + the test was run to the test name. Used by ReplicateAndRunTests to |
| + identify which device ran each copy of the test, and to ensure each copy |
| + of the test is recorded separately. |
| Returns: |
| A tuple of (TestRunResults object, exit code) |
| """ |
| - logging.warning('Running %s tests with %s test runners.' % |
| - (len(tests), len(runners))) |
| - tests_collection = _TestCollection([_Test(t) for t in tests]) |
| + logging.warning('Running tests with %s test runners.' % (len(runners))) |
| results = [] |
| exit_code = 0 |
| watcher = watchdog_timer.WatchdogTimer(timeout) |
| + |
| workers = reraiser_thread.ReraiserThreadGroup( |
| [reraiser_thread.ReraiserThread( |
| _RunTestsFromQueue, |
| - [r, tests_collection, results, watcher, num_retries], |
| + [r, test_collection_factory(), results, watcher, num_retries, |
| + tag_results_with_device], |
| name=r.device[-4:]) |
| for r in runners]) |
| run_results = base_test_result.TestRunResults() |
| @@ -267,36 +292,124 @@ def _TearDownRunners(runners, timeout=None): |
| threads.JoinAll(watchdog_timer.WatchdogTimer(timeout)) |
| -def ShardAndRunTests(runner_factory, devices, tests, build_type='Debug', |
| - test_timeout=DEFAULT_TIMEOUT, |
| - setup_timeout=DEFAULT_TIMEOUT, |
| - num_retries=2): |
| + |
| +def _GetAttachedDevices(wait_for_debugger=False, test_device=None): |
| + """Get all attached devices. |
| + |
| + If we are using a debugger, limit to only one device. |
| + |
| + Args: |
| + wait_for_debugger: True if this run will use a debugger. |
| + test_device: name of a specific device to use. |
| + |
| + Returns: |
| + A list of attached devices. |
| + """ |
| + attached_devices = [] |
| + |
| + attached_devices = android_commands.GetAttachedDevices() |
| + if test_device: |
| + assert test_device in attached_devices |
| + attached_devices = [test_device] |
| + |
| + if len(attached_devices) > 1 and wait_for_debugger: |
| + logging.warning('Debugger can not be sharded, using first available device') |
| + attached_devices = attached_devices[:1] |
| + |
| + return attached_devices |
| + |
| + |
| +def ReplicateAndRunTests(tests, wait_for_debugger, test_device, |
| + *args, **kwargs): |
| + """Replicates the tests for each device, so all devices run every test. |
| + |
| + Args: |
| + tests: A list of tests to run. |
| + wait_for_debugger: True if this test is using a debugger. |
| + test_device: A specific device to run tests on, or None. |
| + *args, **kwargs: Args and kwargs to RunTests which we pass through. |
| + |
| + Returns: |
| + A tuple of (base_test_result.TestRunResults object, exit code). |
| + """ |
| + |
| + if not tests: |
| + logging.error('No tests to run.') |
| + return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE) |
| + |
| + logging.info('Will run %d tests: %s', len(tests), str(tests)) |
| + |
| + # Genereate a unique _TestCollection object for each test runner, but use |
| + # the same set of tests. |
| + TestCollectionFactory = lambda: _TestCollection([_Test(t) for t in tests]) |
| + |
| + devices = _GetAttachedDevices(wait_for_debugger, test_device) |
| + return _RunTests(TestCollectionFactory, devices, *args, |
| + tag_results_with_device=True, **kwargs) |
| + |
| + |
| +def ShardAndRunTests(tests, wait_for_debugger, test_device, *args, **kwargs): |
| + """Distrbutes all tests over devices through a shared pool of tests. |
|
frankf
2013/07/16 00:02:18
It's sufficient to say "Shards tests over devices"
gkanwar
2013/07/16 00:47:03
Done.
|
| + |
| + Args: |
| + tests: A list of tests to run. |
| + wait_for_debugger: True if this test is using a debugger. |
| + test_device: A specific device to run tests on, or None. |
| + *args, **kwargs: Args and kwargs to _RunTests which we pass through. |
| + |
| + Returns: |
| + A tuple of (base_test_result.TestRunResults object, exit code). |
| + """ |
| + |
| + if not tests: |
|
frankf
2013/07/16 00:02:18
There's a lot duplication between these two method
gkanwar
2013/07/16 00:47:03
I ended up combining the two methods back together
|
| + logging.error('No tests to run.') |
| + return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE) |
| + |
| + logging.info('Will run %d tests: %s', len(tests), str(tests)) |
| + |
| + # Genereate a shared _TestCollection object for all test runners, so they draw |
| + # from a common pool of tests. |
| + shared_test_collection = _TestCollection([_Test(t) for t in tests]) |
| + TestCollectionFactory = lambda: shared_test_collection |
| + |
| + devices = _GetAttachedDevices(wait_for_debugger, test_device) |
| + return _RunTests(TestCollectionFactory, devices, *args, |
| + tag_results_with_device=False, **kwargs) |
| + |
| + |
| +def _RunTests(test_collection_factory, devices, runner_factory, |
|
frankf
2013/07/16 00:02:18
It's convention to move callee above caller
gkanwar
2013/07/16 00:47:03
Done.
|
| + build_type='Debug', |
| + test_timeout=DEFAULT_TIMEOUT, |
| + setup_timeout=DEFAULT_TIMEOUT, |
| + num_retries=2, |
| + tag_results_with_device=False): |
| """Run all tests on attached devices, retrying tests that don't pass. |
| Args: |
| - runner_factory: callable that takes a device and index and returns a |
| - TestRunner object. |
| + test_collection_factory: callable that is used to generate a _TestCollection |
| + object for each test runner. |
| devices: list of attached device serial numbers as strings. |
| - tests: list of tests to run. |
| build_type: either 'Debug' or 'Release'. |
| + runner_factory: callable that takes a device and index and returns a |
| + TestRunner object. |
| test_timeout: watchdog timeout in seconds for running tests, defaults to the |
| - default timeout. |
| + default timeout. |
| setup_timeout: watchdog timeout in seconds for creating and cleaning up |
| - test runners, defaults to the default timeout. |
| + test runners, defaults to the default timeout. |
| num_retries: number of retries for a test. |
| + tag_results_with_device: If True, appends the name of the device on which |
| + the test was run to the test name. Used by ReplicateAndRunTests to |
| + identify which device ran each copy of the test, and to ensure each copy |
| + of the test is recorded separately. |
| Returns: |
| A tuple of (base_test_result.TestRunResults object, exit code). |
| """ |
| - if not tests: |
| - logging.error('No tests to run.') |
| - return (base_test_result.TestRunResults(), constants.ERROR_EXIT_CODE) |
| - |
| - logging.info('Will run %d tests: %s', len(tests), str(tests)) |
| forwarder.Forwarder.KillHost(build_type) |
| runners = _CreateRunners(runner_factory, devices, setup_timeout) |
| try: |
| - return _RunAllTests(runners, tests, num_retries, test_timeout) |
| + return _RunAllTests(runners, test_collection_factory, |
| + num_retries, test_timeout, tag_results_with_device) |
| finally: |
| try: |
| _TearDownRunners(runners, setup_timeout) |