| OLD | NEW |
| 1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import fnmatch | 5 import fnmatch |
| 6 import functools | 6 import functools |
| 7 import imp | 7 import imp |
| 8 import logging | 8 import logging |
| 9 import signal | 9 import signal |
| 10 import thread | 10 import thread |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 116 if isinstance(tests, test_collection.TestCollection): | 116 if isinstance(tests, test_collection.TestCollection): |
| 117 tests.add(test) | 117 tests.add(test) |
| 118 raise | 118 raise |
| 119 finally: | 119 finally: |
| 120 if isinstance(tests, test_collection.TestCollection): | 120 if isinstance(tests, test_collection.TestCollection): |
| 121 tests.test_completed() | 121 tests.test_completed() |
| 122 | 122 |
| 123 | 123 |
| 124 logging.info('Finished running tests on this device.') | 124 logging.info('Finished running tests on this device.') |
| 125 | 125 |
| 126 class TestsTerminated(Exception): |
| 127 pass |
| 128 |
| 126 def stop_tests(_signum, _frame): | 129 def stop_tests(_signum, _frame): |
| 130 logging.critical('Received SIGTERM. Stopping test execution.') |
| 127 exit_now.set() | 131 exit_now.set() |
| 132 raise TestsTerminated() |
| 128 | 133 |
| 129 with signal_handler.AddSignalHandler(signal.SIGTERM, stop_tests): | 134 try: |
| 130 tries = 0 | 135 with signal_handler.AddSignalHandler(signal.SIGTERM, stop_tests): |
| 131 results = [] | 136 tries = 0 |
| 132 while tries < self._env.max_tries and tests: | 137 results = [] |
| 133 logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) | 138 while tries < self._env.max_tries and tests: |
| 134 logging.info('Will run %d tests on %d devices: %s', | 139 logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) |
| 135 len(tests), len(self._env.devices), | 140 logging.info('Will run %d tests on %d devices: %s', |
| 136 ', '.join(str(d) for d in self._env.devices)) | 141 len(tests), len(self._env.devices), |
| 137 for t in tests: | 142 ', '.join(str(d) for d in self._env.devices)) |
| 138 logging.debug(' %s', t) | 143 for t in tests: |
| 144 logging.debug(' %s', t) |
| 139 | 145 |
| 140 try_results = base_test_result.TestRunResults() | 146 try_results = base_test_result.TestRunResults() |
| 141 if self._ShouldShard(): | 147 test_names = (self._GetUniqueTestName(t) for t in tests) |
| 142 tc = test_collection.TestCollection(self._CreateShards(tests)) | 148 try_results.AddResults( |
| 143 self._env.parallel_devices.pMap( | 149 base_test_result.BaseTestResult( |
| 144 run_tests_on_device, tc, try_results).pGet(None) | 150 t, base_test_result.ResultType.UNKNOWN) |
| 145 else: | 151 for t in test_names if not t.endswith('*')) |
| 146 self._env.parallel_devices.pMap( | |
| 147 run_tests_on_device, tests, try_results).pGet(None) | |
| 148 | 152 |
| 149 results.append(try_results) | 153 try: |
| 150 tries += 1 | 154 if self._ShouldShard(): |
| 151 tests = self._GetTestsToRetry(tests, try_results) | 155 tc = test_collection.TestCollection(self._CreateShards(tests)) |
| 156 self._env.parallel_devices.pMap( |
| 157 run_tests_on_device, tc, try_results).pGet(None) |
| 158 else: |
| 159 self._env.parallel_devices.pMap( |
| 160 run_tests_on_device, tests, try_results).pGet(None) |
| 161 finally: |
| 162 results.append(try_results) |
| 152 | 163 |
| 153 logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) | 164 tries += 1 |
| 154 if tests: | 165 tests = self._GetTestsToRetry(tests, try_results) |
| 155 logging.info('%d failed tests remain.', len(tests)) | 166 |
| 156 else: | 167 logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) |
| 157 logging.info('All tests completed.') | 168 if tests: |
| 169 logging.info('%d failed tests remain.', len(tests)) |
| 170 else: |
| 171 logging.info('All tests completed.') |
| 172 except TestsTerminated: |
| 173 pass |
| 158 | 174 |
| 159 return results | 175 return results |
| 160 | 176 |
| 161 def _GetTestsToRetry(self, tests, try_results): | 177 def _GetTestsToRetry(self, tests, try_results): |
| 162 | 178 |
| 163 def is_failure(test_result): | 179 def is_failure(test_result): |
| 164 return ( | 180 return ( |
| 165 test_result is None | 181 test_result is None |
| 166 or test_result.GetType() not in ( | 182 or test_result.GetType() not in ( |
| 167 base_test_result.ResultType.PASS, | 183 base_test_result.ResultType.PASS, |
| (...skipping 25 matching lines...) Expand all Loading... |
| 193 return test | 209 return test |
| 194 | 210 |
| 195 def _GetTests(self): | 211 def _GetTests(self): |
| 196 raise NotImplementedError | 212 raise NotImplementedError |
| 197 | 213 |
| 198 def _RunTest(self, device, test): | 214 def _RunTest(self, device, test): |
| 199 raise NotImplementedError | 215 raise NotImplementedError |
| 200 | 216 |
| 201 def _ShouldShard(self): | 217 def _ShouldShard(self): |
| 202 raise NotImplementedError | 218 raise NotImplementedError |
| OLD | NEW |