Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import fnmatch | 5 import fnmatch |
| 6 import functools | 6 import functools |
| 7 import imp | 7 import imp |
| 8 import logging | 8 import logging |
| 9 | 9 |
| 10 from devil import base_error | 10 from devil import base_error |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 69 logging.exception('Shard failed: %s(%s)', f.__name__, | 69 logging.exception('Shard failed: %s(%s)', f.__name__, |
| 70 str(dev)) | 70 str(dev)) |
| 71 if on_failure: | 71 if on_failure: |
| 72 on_failure(dev, f.__name__) | 72 on_failure(dev, f.__name__) |
| 73 return None | 73 return None |
| 74 | 74 |
| 75 return wrapper | 75 return wrapper |
| 76 | 76 |
| 77 return decorator | 77 return decorator |
| 78 | 78 |
| 79 def find_suspects(unfinished_tests, shards): | |
|
jbudorick
2016/02/18 23:31:56
This is gtest-specific and should be handled as su
| |
| 80 # The first unfinished test in each shard is a suspicious testcase that may | |
| 81 # have crashed and have caused all the following testcases in that shard not | |
| 82 # run. | |
| 83 suspects = [] | |
| 84 for shard in shards: | |
| 85 for test in shard: | |
| 86 if (test in unfinished_tests): | |
| 87 suspects += test | |
| 88 break | |
| 89 return suspects | |
| 79 | 90 |
| 80 class LocalDeviceTestRun(test_run.TestRun): | 91 class LocalDeviceTestRun(test_run.TestRun): |
| 81 | 92 |
| 82 def __init__(self, env, test_instance): | 93 def __init__(self, env, test_instance): |
| 83 super(LocalDeviceTestRun, self).__init__(env, test_instance) | 94 super(LocalDeviceTestRun, self).__init__(env, test_instance) |
| 84 self._tools = {} | 95 self._tools = {} |
| 85 | 96 |
| 86 #override | 97 #override |
| 87 def RunTests(self): | 98 def RunTests(self): |
| 88 tests = self._GetTests() | 99 tests = self._GetTests() |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 107 finally: | 118 finally: |
| 108 if isinstance(tests, test_collection.TestCollection): | 119 if isinstance(tests, test_collection.TestCollection): |
| 109 tests.test_completed() | 120 tests.test_completed() |
| 110 | 121 |
| 111 | 122 |
| 112 logging.info('Finished running tests on this device.') | 123 logging.info('Finished running tests on this device.') |
| 113 | 124 |
| 114 tries = 0 | 125 tries = 0 |
| 115 results = base_test_result.TestRunResults() | 126 results = base_test_result.TestRunResults() |
| 116 all_fail_results = {} | 127 all_fail_results = {} |
| 128 suspects = [] | |
| 117 while tries < self._env.max_tries and tests: | 129 while tries < self._env.max_tries and tests: |
| 118 logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) | 130 logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) |
| 119 logging.info('Will run %d tests on %d devices: %s', | 131 logging.info('Will run %d tests on %d devices: %s', |
| 120 len(tests), len(self._env.devices), | 132 len(tests), len(self._env.devices), |
| 121 ', '.join(str(d) for d in self._env.devices)) | 133 ', '.join(str(d) for d in self._env.devices)) |
| 122 for t in tests: | 134 for t in tests: |
| 123 logging.debug(' %s', t) | 135 logging.debug(' %s', t) |
| 124 | 136 |
| 125 try_results = base_test_result.TestRunResults() | 137 try_results = base_test_result.TestRunResults() |
| 126 if self._ShouldShard(): | 138 if self._ShouldShard(): |
| 127 tc = test_collection.TestCollection(self._CreateShards(tests)) | 139 shards = self._CreateShards(tests, suspects) |
|
jbudorick
2016/02/18 23:31:56
Shard creation is going to have to be result-aware
| |
| 140 tc = test_collection.TestCollection(shards) | |
| 128 self._env.parallel_devices.pMap( | 141 self._env.parallel_devices.pMap( |
| 129 run_tests_on_device, tc, try_results).pGet(None) | 142 run_tests_on_device, tc, try_results).pGet(None) |
| 130 else: | 143 else: |
| 131 self._env.parallel_devices.pMap( | 144 self._env.parallel_devices.pMap( |
| 132 run_tests_on_device, tests, try_results).pGet(None) | 145 run_tests_on_device, tests, try_results).pGet(None) |
| 133 | 146 |
| 134 for result in try_results.GetAll(): | 147 for result in try_results.GetAll(): |
| 135 if result.GetType() in (base_test_result.ResultType.PASS, | 148 if result.GetType() in (base_test_result.ResultType.PASS, |
| 136 base_test_result.ResultType.SKIP): | 149 base_test_result.ResultType.SKIP): |
| 137 results.AddResult(result) | 150 results.AddResult(result) |
| 138 else: | 151 else: |
| 139 all_fail_results[result.GetName()] = result | 152 all_fail_results[result.GetName()] = result |
| 140 | 153 |
| 141 results_names = set(r.GetName() for r in results.GetAll()) | 154 results_names = set(r.GetName() for r in results.GetAll()) |
| 142 | 155 |
| 143 def has_test_result(name): | 156 def has_test_result(name): |
| 144 # When specifying a test filter, names can contain trailing wildcards. | 157 # When specifying a test filter, names can contain trailing wildcards. |
| 145 # See local_device_gtest_run._ExtractTestsFromFilter() | 158 # See local_device_gtest_run._ExtractTestsFromFilter() |
| 146 if name.endswith('*'): | 159 if name.endswith('*'): |
| 147 return any(fnmatch.fnmatch(n, name) for n in results_names) | 160 return any(fnmatch.fnmatch(n, name) for n in results_names) |
| 148 return name in results_names | 161 return name in results_names |
| 149 | |
| 150 tests = [t for t in tests if not has_test_result(self._GetTestName(t))] | 162 tests = [t for t in tests if not has_test_result(self._GetTestName(t))] |
| 163 suspects = find_suspects(tests, shards) | |
| 151 tries += 1 | 164 tries += 1 |
| 152 logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) | 165 logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) |
| 153 if tests: | 166 if tests: |
| 154 logging.info('%d failed tests remain.', len(tests)) | 167 logging.info('%d failed tests remain.', len(tests)) |
| 155 else: | 168 else: |
| 156 logging.info('All tests completed.') | 169 logging.info('All tests completed.') |
| 157 | 170 |
| 158 all_unknown_test_names = set(self._GetTestName(t) for t in tests) | 171 all_unknown_test_names = set(self._GetTestName(t) for t in tests) |
| 159 all_failed_test_names = set(all_fail_results.iterkeys()) | 172 all_failed_test_names = set(all_fail_results.iterkeys()) |
| 160 | 173 |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 185 return test | 198 return test |
| 186 | 199 |
| 187 def _GetTests(self): | 200 def _GetTests(self): |
| 188 raise NotImplementedError | 201 raise NotImplementedError |
| 189 | 202 |
| 190 def _RunTest(self, device, test): | 203 def _RunTest(self, device, test): |
| 191 raise NotImplementedError | 204 raise NotImplementedError |
| 192 | 205 |
| 193 def _ShouldShard(self): | 206 def _ShouldShard(self): |
| 194 raise NotImplementedError | 207 raise NotImplementedError |
| OLD | NEW |