OLD | NEW |
(Empty) | |
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 |
| 5 import logging |
| 6 |
| 7 from pylib import valgrind_tools |
| 8 from pylib.base import base_test_result |
| 9 from pylib.base import test_run |
| 10 from pylib.base import test_collection |
| 11 |
| 12 |
| 13 class LocalDeviceTestRun(test_run.TestRun): |
| 14 |
| 15 def __init__(self, env, test_instance): |
| 16 super(LocalDeviceTestRun, self).__init__(env, test_instance) |
| 17 self._tools = {} |
| 18 |
| 19 #override |
| 20 def RunTests(self): |
| 21 tests = self._GetTests() |
| 22 |
| 23 def run_tests_on_device(dev, tests): |
| 24 r = base_test_result.TestRunResults() |
| 25 for test in tests: |
| 26 result = self._RunTest(dev, test) |
| 27 if isinstance(result, base_test_result.BaseTestResult): |
| 28 r.AddResult(result) |
| 29 elif isinstance(result, list): |
| 30 r.AddResults(result) |
| 31 else: |
| 32 raise Exception('Unexpected result type: %s' % type(result).__name__) |
| 33 if isinstance(tests, test_collection.TestCollection): |
| 34 tests.test_completed() |
| 35 return r |
| 36 |
| 37 tries = 0 |
| 38 results = base_test_result.TestRunResults() |
| 39 all_fail_results = {} |
| 40 while tries < self._env.max_tries and tests: |
| 41 logging.debug('try %d, will run %d tests:', tries, len(tests)) |
| 42 for t in tests: |
| 43 logging.debug(' %s', t) |
| 44 |
| 45 if self._ShouldShard(): |
| 46 tc = test_collection.TestCollection(self._CreateShards(tests)) |
| 47 try_results = self._env.parallel_devices.pMap( |
| 48 run_tests_on_device, tc).pGet(None) |
| 49 else: |
| 50 try_results = self._env.parallel_devices.pMap( |
| 51 run_tests_on_device, tests).pGet(None) |
| 52 for try_result in try_results: |
| 53 for result in try_result.GetAll(): |
| 54 if result.GetType() in (base_test_result.ResultType.PASS, |
| 55 base_test_result.ResultType.SKIP): |
| 56 results.AddResult(result) |
| 57 else: |
| 58 all_fail_results[result.GetName()] = result |
| 59 |
| 60 results_names = set(r.GetName() for r in results.GetAll()) |
| 61 tests = [t for t in tests if self._GetTestName(t) not in results_names] |
| 62 tries += 1 |
| 63 |
| 64 all_unknown_test_names = set(self._GetTestName(t) for t in tests) |
| 65 all_failed_test_names = set(all_fail_results.iterkeys()) |
| 66 |
| 67 unknown_tests = all_unknown_test_names.difference(all_failed_test_names) |
| 68 failed_tests = all_failed_test_names.intersection(all_unknown_test_names) |
| 69 |
| 70 if unknown_tests: |
| 71 results.AddResults( |
| 72 base_test_result.BaseTestResult( |
| 73 u, base_test_result.ResultType.UNKNOWN) |
| 74 for u in unknown_tests) |
| 75 if failed_tests: |
| 76 results.AddResults(all_fail_results[f] for f in failed_tests) |
| 77 |
| 78 return results |
| 79 |
| 80 def GetTool(self, device): |
| 81 if not str(device) in self._tools: |
| 82 self._tools[str(device)] = valgrind_tools.CreateTool( |
| 83 self._env.tool, device) |
| 84 return self._tools[str(device)] |
| 85 |
| 86 def _CreateShards(self, tests): |
| 87 raise NotImplementedError |
| 88 |
| 89 def _GetTestName(self, test): |
| 90 return test |
| 91 |
| 92 def _GetTests(self): |
| 93 raise NotImplementedError |
| 94 |
| 95 def _RunTest(self, device, test): |
| 96 raise NotImplementedError |
| 97 |
| 98 def _ShouldShard(self): |
| 99 raise NotImplementedError |
OLD | NEW |