OLD | NEW |
1 # Copyright 2014 The Chromium Authors. All rights reserved. | 1 # Copyright 2014 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import fnmatch | 5 import fnmatch |
6 import functools | 6 import functools |
7 import imp | 7 import imp |
8 import logging | 8 import logging |
9 | 9 |
10 from devil import base_error | 10 from devil import base_error |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
106 tests.add(test) | 106 tests.add(test) |
107 raise | 107 raise |
108 finally: | 108 finally: |
109 if isinstance(tests, test_collection.TestCollection): | 109 if isinstance(tests, test_collection.TestCollection): |
110 tests.test_completed() | 110 tests.test_completed() |
111 | 111 |
112 | 112 |
113 logging.info('Finished running tests on this device.') | 113 logging.info('Finished running tests on this device.') |
114 | 114 |
115 tries = 0 | 115 tries = 0 |
116 results = base_test_result.TestRunResults() | 116 results = [] |
117 all_fail_results = {} | |
118 while tries < self._env.max_tries and tests: | 117 while tries < self._env.max_tries and tests: |
119 logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) | 118 logging.info('STARTING TRY #%d/%d', tries + 1, self._env.max_tries) |
120 logging.info('Will run %d tests on %d devices: %s', | 119 logging.info('Will run %d tests on %d devices: %s', |
121 len(tests), len(self._env.devices), | 120 len(tests), len(self._env.devices), |
122 ', '.join(str(d) for d in self._env.devices)) | 121 ', '.join(str(d) for d in self._env.devices)) |
123 for t in tests: | 122 for t in tests: |
124 logging.debug(' %s', t) | 123 logging.debug(' %s', t) |
125 | 124 |
126 try_results = base_test_result.TestRunResults() | 125 try_results = base_test_result.TestRunResults() |
127 if self._ShouldShard(): | 126 if self._ShouldShard(): |
128 tc = test_collection.TestCollection(self._CreateShards(tests)) | 127 tc = test_collection.TestCollection(self._CreateShards(tests)) |
129 self._env.parallel_devices.pMap( | 128 self._env.parallel_devices.pMap( |
130 run_tests_on_device, tc, try_results).pGet(None) | 129 run_tests_on_device, tc, try_results).pGet(None) |
131 else: | 130 else: |
132 self._env.parallel_devices.pMap( | 131 self._env.parallel_devices.pMap( |
133 run_tests_on_device, tests, try_results).pGet(None) | 132 run_tests_on_device, tests, try_results).pGet(None) |
134 | 133 |
135 for result in try_results.GetAll(): | 134 results.append(try_results) |
136 if result.GetType() in (base_test_result.ResultType.PASS, | 135 tries += 1 |
137 base_test_result.ResultType.SKIP): | 136 tests = self._GetTestsToRetry(tests, try_results) |
138 results.AddResult(result) | |
139 else: | |
140 all_fail_results[result.GetName()] = result | |
141 | 137 |
142 results_names = set(r.GetName() for r in results.GetAll()) | |
143 | |
144 def has_test_result(name): | |
145 # When specifying a test filter, names can contain trailing wildcards. | |
146 # See local_device_gtest_run._ExtractTestsFromFilter() | |
147 if name.endswith('*'): | |
148 return any(fnmatch.fnmatch(n, name) for n in results_names) | |
149 return name in results_names | |
150 | |
151 tests = [t for t in tests if not has_test_result(self._GetTestName(t))] | |
152 tries += 1 | |
153 logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) | 138 logging.info('FINISHED TRY #%d/%d', tries, self._env.max_tries) |
154 if tests: | 139 if tests: |
155 logging.info('%d failed tests remain.', len(tests)) | 140 logging.info('%d failed tests remain.', len(tests)) |
156 else: | 141 else: |
157 logging.info('All tests completed.') | 142 logging.info('All tests completed.') |
158 | 143 |
159 all_unknown_test_names = set(self._GetTestName(t) for t in tests) | 144 return results |
160 all_failed_test_names = set(all_fail_results.iterkeys()) | |
161 | 145 |
162 unknown_tests = all_unknown_test_names.difference(all_failed_test_names) | 146 def _GetTestsToRetry(self, tests, try_results): |
163 failed_tests = all_failed_test_names.intersection(all_unknown_test_names) | |
164 | 147 |
165 if unknown_tests: | 148 def is_failure(test_result): |
166 results.AddResults( | 149 return ( |
167 base_test_result.BaseTestResult( | 150 test_result is None |
168 u, base_test_result.ResultType.UNKNOWN) | 151 or test_result.GetType() not in ( |
169 for u in unknown_tests) | 152 base_test_result.ResultType.PASS, |
170 if failed_tests: | 153 base_test_result.ResultType.SKIP)) |
171 results.AddResults(all_fail_results[f] for f in failed_tests) | |
172 | 154 |
173 return results | 155 all_test_results = {r.GetName(): r for r in try_results.GetAll()} |
| 156 |
| 157 def should_retry(name): |
| 158 # When specifying a test filter, names can contain trailing wildcards. |
| 159 # See local_device_gtest_run._ExtractTestsFromFilter() |
| 160 if name.endswith('*'): |
| 161 return any(fnmatch.fnmatch(n, name) and is_failure(t) |
| 162 for n, t in all_test_results.iteritems()) |
| 163 return is_failure(all_test_results.get(name)) |
| 164 |
| 165 return [t for t in tests if should_retry(self._GetTestName(t))] |
174 | 166 |
175 def GetTool(self, device): | 167 def GetTool(self, device): |
176 if not str(device) in self._tools: | 168 if not str(device) in self._tools: |
177 self._tools[str(device)] = valgrind_tools.CreateTool( | 169 self._tools[str(device)] = valgrind_tools.CreateTool( |
178 self._env.tool, device) | 170 self._env.tool, device) |
179 return self._tools[str(device)] | 171 return self._tools[str(device)] |
180 | 172 |
181 def _CreateShards(self, tests): | 173 def _CreateShards(self, tests): |
182 raise NotImplementedError | 174 raise NotImplementedError |
183 | 175 |
184 # pylint: disable=no-self-use | 176 # pylint: disable=no-self-use |
185 def _GetTestName(self, test): | 177 def _GetTestName(self, test): |
186 return test | 178 return test |
187 | 179 |
188 def _GetTests(self): | 180 def _GetTests(self): |
189 raise NotImplementedError | 181 raise NotImplementedError |
190 | 182 |
191 def _RunTest(self, device, test): | 183 def _RunTest(self, device, test): |
192 raise NotImplementedError | 184 raise NotImplementedError |
193 | 185 |
194 def _ShouldShard(self): | 186 def _ShouldShard(self): |
195 raise NotImplementedError | 187 raise NotImplementedError |
OLD | NEW |