| Index: build/android/pylib/host_driven/python_test_sharder.py
|
| diff --git a/build/android/pylib/host_driven/python_test_sharder.py b/build/android/pylib/host_driven/python_test_sharder.py
|
| index cc746878ef3ba6acb7476b0dd28cc33723a353a7..ec7a28eea2decf336b1631c8dc2f994e8679683b 100644
|
| --- a/build/android/pylib/host_driven/python_test_sharder.py
|
| +++ b/build/android/pylib/host_driven/python_test_sharder.py
|
| @@ -4,45 +4,46 @@
|
|
|
| """Takes care of sharding the python-drive tests in multiple devices."""
|
|
|
| +# TODO(gkanwar): Rename to python_test_runner.py
|
| +
|
| import copy
|
| import logging
|
| import multiprocessing
|
|
|
| from pylib.base import base_test_result
|
| +from pylib.base import base_test_runner
|
| from pylib.base import sharded_tests_queue
|
| -from pylib.forwarder import Forwarder
|
| -
|
| -from python_test_caller import CallPythonTest
|
| -
|
| +from pylib.instrumentation import test_result
|
|
|
| -def SetTestsContainer(tests_container):
|
| - """Sets PythonTestSharder as a top-level field.
|
| +import python_test_base
|
|
|
| - PythonTestSharder uses multiprocessing.Pool, which creates a pool of
|
| - processes. This is used to initialize each worker in the pool, ensuring that
|
| - each worker has access to this shared pool of tests.
|
|
|
| - The multiprocessing module requires that this be a top-level method.
|
| -
|
| - Args:
|
| - tests_container: the container for all the tests.
|
| - """
|
| - PythonTestSharder.tests_container = tests_container
|
| +class PythonExceptionTestResult(test_result.InstrumentationTestResult):
|
| + """Helper class for creating a test result from python exception."""
|
|
|
| + def __init__(self, test_name, start_date_ms, exc_info):
|
| + """Constructs an PythonExceptionTestResult object.
|
|
|
| -def _DefaultRunnable(test_runner):
|
| - """A default runnable for a PythonTestRunner.
|
| -
|
| - Args:
|
| - test_runner: A PythonTestRunner which will run tests.
|
| + Args:
|
| + test_name: name of the test which raised an exception.
|
| + start_date_ms: the starting time for the test.
|
| + exc_info: exception info, ostensibly from sys.exc_info().
|
| + """
|
| + exc_type, exc_value, exc_traceback = exc_info
|
| + trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
|
| + exc_traceback))
|
| + log_msg = 'Exception:\n' + trace_info
|
| + duration_ms = (int(time.time()) * 1000) - start_date_ms
|
|
|
| - Returns:
|
| - The test results.
|
| - """
|
| - return test_runner.RunTests()
|
| + super(PythonExceptionTestResult, self).__init__(
|
| + 'PythonWrapper#' + test_name,
|
| + base_test_result.ResultType.FAIL,
|
| + start_date_ms,
|
| + duration_ms,
|
| + log=str(exc_type) + ' ' + log_msg)
|
|
|
|
|
| -class PythonTestRunner(object):
|
| +class PythonTestRunner(base_test_runner.BaseTestRunner):
|
| """Thin wrapper around a list of PythonTestBase instances.
|
|
|
| This is meant to be a long-lived object which can run multiple Python tests
|
| @@ -52,152 +53,56 @@ class PythonTestRunner(object):
|
| DEFAULT_PORT + shard_index) if the test so wishes.
|
| """
|
|
|
| - def __init__(self, options):
|
| - """Constructor.
|
| -
|
| - Args:
|
| - options: Options to use for setting up tests.
|
| - """
|
| - self.options = options
|
| -
|
| - def RunTests(self):
|
| - """Runs tests from the shared pool of tests, aggregating results.
|
| -
|
| - Returns:
|
| - A list of test results for all of the tests which this runner executed.
|
| - """
|
| - tests = PythonTestSharder.tests_container
|
| -
|
| - results = base_test_result.TestRunResults()
|
| - for t in tests:
|
| - results.AddTestRunResults(CallPythonTest(t, self.options))
|
| - return results
|
| -
|
| -
|
| -class PythonTestSharder(object):
|
| - """Runs Python tests in parallel on multiple devices.
|
| -
|
| - This is lifted more or less wholesale from BaseTestRunner.
|
| -
|
| - Under the covers, it creates a pool of long-lived PythonTestRunners, which
|
| - execute tests from the pool of tests.
|
| -
|
| - Args:
|
| - attached_devices: a list of device IDs attached to the host.
|
| - available_tests: a list of tests to run which subclass PythonTestBase.
|
| - options: Options to use for setting up tests.
|
| -
|
| - Returns:
|
| - An aggregated list of test results.
|
| - """
|
| - tests_container = None
|
| -
|
| - def __init__(self, attached_devices, available_tests, options):
|
| - self.options = options
|
| - self.attached_devices = attached_devices
|
| - self.retries = options.shard_retries
|
| - self.tests = available_tests
|
| + #override
|
| + def __init__(self, options, device, shard_index):
|
| + """Create a new PythonTestRunner
|
|
|
| - def _SetupSharding(self, tests):
|
| - """Creates the shared pool of tests and makes it available to test runners.
|
| + This is a thin wrapper around the instrumentation TestRunner, since this
|
| + test runner essentially does the same things as the instrumentation test
|
| + runner with slight changes.
|
|
|
| Args:
|
| - tests: the list of tests which will be consumed by workers.
|
| + options: An optparse.Options object requiring the following attributes
|
| + (list pulled from pylib/instrumentation/test_runner.py):
|
| + - build_type: 'Release' or 'Debug'.
|
| + - install_apk: Re-installs the apk if opted.
|
| + - save_perf_json: Whether or not to save the JSON file from UI perf
|
| + tests.
|
| + - screenshot_failures: Take a screenshot for a test failure
|
| + - tool: Name of the Valgrind tool.
|
| + - wait_for_debugger: blocks until the debugger is connected.
|
| + - disable_assertions: Whether to disable java assertions on the device.
|
| + - push_deps: If True, push all dependencies to the device.
|
| + device: Attached android device.
|
| + shard_index: Shard index.
|
| + test_package: A TestPackage object, or None if no test package needs to be
|
| + installed.
|
| """
|
| - SetTestsContainer(sharded_tests_queue.ShardedTestsQueue(
|
| - len(self.attached_devices), tests))
|
| -
|
| - def RunShardedTests(self):
|
| - """Runs tests in parallel using a pool of workers.
|
| + super(PythonTestRunner, self).__init__(
|
| + device, options.tool, options.build_type, options.push_deps)
|
| + self.options = options
|
| + self.options.shard_index = shard_index
|
| + self.options.device_id = device
|
|
|
| - Returns:
|
| - A list of test results aggregated from all test runs.
|
| - """
|
| - logging.warning('*' * 80)
|
| - logging.warning('Sharding in ' + str(len(self.attached_devices)) +
|
| - ' devices.')
|
| - logging.warning('Note that the output is not synchronized.')
|
| - logging.warning('Look for the "Final result" banner in the end.')
|
| - logging.warning('*' * 80)
|
| - final_results = base_test_result.TestRunResults()
|
| - tests_to_run = self.tests
|
| -
|
| - Forwarder.KillHost()
|
| -
|
| - for retry in xrange(self.retries):
|
| - logging.warning('Try %d of %d', retry + 1, self.retries)
|
| - self._SetupSharding(self.tests)
|
| - test_runners = self._MakeTestRunners(self.attached_devices)
|
| - logging.warning('Starting...')
|
| - pool = multiprocessing.Pool(len(self.attached_devices),
|
| - SetTestsContainer,
|
| - [PythonTestSharder.tests_container])
|
| -
|
| - # List of TestRunResults objects from each test execution.
|
| - try:
|
| - results_lists = pool.map(_DefaultRunnable, test_runners)
|
| - except Exception:
|
| - logging.exception('Unable to run tests. Something with the '
|
| - 'PythonTestRunners has gone wrong.')
|
| - raise Exception('PythonTestRunners were unable to run tests.')
|
| -
|
| - test_results = base_test_result.TestRunResults()
|
| - for t in results_lists:
|
| - test_results.AddTestRunResults(t)
|
| - # Accumulate passing results.
|
| - final_results.AddResults(test_results.GetPass())
|
| - # If we have failed tests, map them to tests to retry.
|
| - failed_tests = [t.GetName() for t in test_results.GetNotPass()]
|
| - tests_to_run = self._GetTestsToRetry(self.tests, failed_tests)
|
| -
|
| - # Bail out early if we have no more tests. This can happen if all tests
|
| - # pass before we're out of retries, for example.
|
| - if not tests_to_run:
|
| - break
|
| -
|
| - # all_passed has accumulated all passing test results.
|
| - # test_results will have the results from the most recent run, which could
|
| - # include a variety of failure modes (unknown, crashed, failed, etc).
|
| - test_results.AddResults(final_results.GetPass())
|
| - final_results = test_results
|
| -
|
| - return final_results
|
| -
|
| - def _MakeTestRunners(self, attached_devices):
|
| - """Initialize and return a list of PythonTestRunners.
|
| + #override
|
| + def RunTest(self, test):
|
| + """Sets up and runs a test case.
|
|
|
| Args:
|
| - attached_devices: list of device IDs attached to host.
|
| + test: an object which is ostensibly a subclass of PythonTestBase.
|
|
|
| Returns:
|
| - A list of PythonTestRunners, one for each device.
|
| + A TestRunResults object which contains any results produced by the test
|
| + or, in the case of a Python exception, the Python exception info, and
|
| + which tests to retry or None.
|
| """
|
| - test_runners = []
|
| - for index, device in enumerate(attached_devices):
|
| - logging.warning('*' * 80)
|
| - logging.warning('Creating shard %d for %s', index, device)
|
| - logging.warning('*' * 80)
|
| - # Bind the PythonTestRunner to a device & shard index. Give it the
|
| - # runnable which it will use to actually execute the tests.
|
| - test_options = copy.deepcopy(self.options)
|
| - test_options.ensure_value('device_id', device)
|
| - test_options.ensure_value('shard_index', index)
|
| - test_runner = PythonTestRunner(test_options)
|
| - test_runners.append(test_runner)
|
| -
|
| - return test_runners
|
| -
|
| - def _GetTestsToRetry(self, available_tests, failed_test_names):
|
| - """Infers a list of tests to retry from failed tests and available tests.
|
|
|
| - Args:
|
| - available_tests: a list of tests which subclass PythonTestBase.
|
| - failed_test_names: a list of failed test names.
|
| + assert(isinstance(test, python_test_base.PythonTestBase))
|
| + test.SetUp(self.options)
|
| + results = test.Run()
|
| + test.TearDown()
|
|
|
| - Returns:
|
| - A list of test objects which correspond to test names found in
|
| - failed_test_names, or an empty list if there is no correspondence.
|
| - """
|
| - tests_to_retry = [t for t in available_tests
|
| - if t.qualified_name in failed_test_names]
|
| - return tests_to_retry
|
| + if not results.DidRunPass():
|
| + return results, test
|
| + else:
|
| + return results, None
|
|
|