OLD | NEW |
1 # Copyright (C) 2011 Google Inc. All rights reserved. | 1 # Copyright (C) 2011 Google Inc. All rights reserved. |
2 # | 2 # |
3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
5 # met: | 5 # met: |
6 # | 6 # |
7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
43 _log = logging.getLogger(__name__) | 43 _log = logging.getLogger(__name__) |
44 | 44 |
45 | 45 |
46 TestExpectations = test_expectations.TestExpectations | 46 TestExpectations = test_expectations.TestExpectations |
47 | 47 |
48 # Export this so callers don't need to know about message pools. | 48 # Export this so callers don't need to know about message pools. |
49 WorkerException = message_pool.WorkerException | 49 WorkerException = message_pool.WorkerException |
50 | 50 |
51 | 51 |
52 class TestRunInterruptedException(Exception): | 52 class TestRunInterruptedException(Exception): |
| 53 |
53 """Raised when a test run should be stopped immediately.""" | 54 """Raised when a test run should be stopped immediately.""" |
| 55 |
54 def __init__(self, reason): | 56 def __init__(self, reason): |
55 Exception.__init__(self) | 57 Exception.__init__(self) |
56 self.reason = reason | 58 self.reason = reason |
57 self.msg = reason | 59 self.msg = reason |
58 | 60 |
59 def __reduce__(self): | 61 def __reduce__(self): |
60 return self.__class__, (self.reason,) | 62 return self.__class__, (self.reason,) |
61 | 63 |
62 | 64 |
63 class LayoutTestRunner(object): | 65 class LayoutTestRunner(object): |
| 66 |
64 def __init__(self, options, port, printer, results_directory, test_is_slow_f
n): | 67 def __init__(self, options, port, printer, results_directory, test_is_slow_f
n): |
65 self._options = options | 68 self._options = options |
66 self._port = port | 69 self._port = port |
67 self._printer = printer | 70 self._printer = printer |
68 self._results_directory = results_directory | 71 self._results_directory = results_directory |
69 self._test_is_slow = test_is_slow_fn | 72 self._test_is_slow = test_is_slow_fn |
70 self._sharder = Sharder(self._port.split_test, self._options.max_locked_
shards) | 73 self._sharder = Sharder(self._port.split_test, self._options.max_locked_
shards) |
71 self._filesystem = self._port.host.filesystem | 74 self._filesystem = self._port.host.filesystem |
72 | 75 |
73 self._expectations = None | 76 self._expectations = None |
(...skipping 17 matching lines...) Expand all Loading... |
91 if not retrying: | 94 if not retrying: |
92 self._printer.print_expected(run_results, self._expectations.get_tes
ts_with_result_type) | 95 self._printer.print_expected(run_results, self._expectations.get_tes
ts_with_result_type) |
93 | 96 |
94 for test_name in set(tests_to_skip): | 97 for test_name in set(tests_to_skip): |
95 result = test_results.TestResult(test_name) | 98 result = test_results.TestResult(test_name) |
96 result.type = test_expectations.SKIP | 99 result.type = test_expectations.SKIP |
97 run_results.add(result, expected=True, test_is_slow=self._test_is_sl
ow(test_name)) | 100 run_results.add(result, expected=True, test_is_slow=self._test_is_sl
ow(test_name)) |
98 | 101 |
99 self._printer.write_update('Sharding tests ...') | 102 self._printer.write_update('Sharding tests ...') |
100 locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, | 103 locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, |
101 int(self._options.child_processes), self._options.fully_parallel, | 104 int(self._opt
ions.child_processes), self._options.fully_parallel, |
102 self._options.run_singly or (self._options.batch_size == 1)) | 105 self._options
.run_singly or (self._options.batch_size == 1)) |
103 | 106 |
104 # We don't have a good way to coordinate the workers so that they don't | 107 # We don't have a good way to coordinate the workers so that they don't |
105 # try to run the shards that need a lock. The easiest solution is to | 108 # try to run the shards that need a lock. The easiest solution is to |
106 # run all of the locked shards first. | 109 # run all of the locked shards first. |
107 all_shards = locked_shards + unlocked_shards | 110 all_shards = locked_shards + unlocked_shards |
108 num_workers = min(num_workers, len(all_shards)) | 111 num_workers = min(num_workers, len(all_shards)) |
109 self._printer.print_workers_and_shards(num_workers, len(all_shards), len
(locked_shards)) | 112 self._printer.print_workers_and_shards(num_workers, len(all_shards), len
(locked_shards)) |
110 | 113 |
111 if self._options.dry_run: | 114 if self._options.dry_run: |
112 return run_results | 115 return run_results |
113 | 116 |
114 self._printer.write_update('Starting %s ...' % grammar.pluralize('worker
', num_workers)) | 117 self._printer.write_update('Starting %s ...' % grammar.pluralize('worker
', num_workers)) |
115 | 118 |
116 start_time = time.time() | 119 start_time = time.time() |
117 try: | 120 try: |
118 with message_pool.get(self, self._worker_factory, num_workers, self.
_port.host) as pool: | 121 with message_pool.get(self, self._worker_factory, num_workers, self.
_port.host) as pool: |
119 pool.run(('test_list', shard.name, shard.test_inputs) for shard
in all_shards) | 122 pool.run(('test_list', shard.name, shard.test_inputs) for shard
in all_shards) |
120 | 123 |
121 if self._shards_to_redo: | 124 if self._shards_to_redo: |
122 num_workers -= len(self._shards_to_redo) | 125 num_workers -= len(self._shards_to_redo) |
123 if num_workers > 0: | 126 if num_workers > 0: |
124 with message_pool.get(self, self._worker_factory, num_worker
s, self._port.host) as pool: | 127 with message_pool.get(self, self._worker_factory, num_worker
s, self._port.host) as pool: |
125 pool.run(('test_list', shard.name, shard.test_inputs) fo
r shard in self._shards_to_redo) | 128 pool.run(('test_list', shard.name, shard.test_inputs) fo
r shard in self._shards_to_redo) |
126 except TestRunInterruptedException, e: | 129 except TestRunInterruptedException as e: |
127 _log.warning(e.reason) | 130 _log.warning(e.reason) |
128 run_results.interrupted = True | 131 run_results.interrupted = True |
129 except KeyboardInterrupt: | 132 except KeyboardInterrupt: |
130 self._printer.flush() | 133 self._printer.flush() |
131 self._printer.writeln('Interrupted, exiting ...') | 134 self._printer.writeln('Interrupted, exiting ...') |
132 run_results.keyboard_interrupted = True | 135 run_results.keyboard_interrupted = True |
133 except Exception, e: | 136 except Exception as e: |
134 _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e
))) | 137 _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e
))) |
135 raise | 138 raise |
136 finally: | 139 finally: |
137 run_results.run_time = time.time() - start_time | 140 run_results.run_time = time.time() - start_time |
138 | 141 |
139 return run_results | 142 return run_results |
140 | 143 |
141 def _worker_factory(self, worker_connection): | 144 def _worker_factory(self, worker_connection): |
142 results_directory = self._results_directory | 145 results_directory = self._results_directory |
143 if self._retrying: | 146 if self._retrying: |
144 self._filesystem.maybe_make_directory(self._filesystem.join(self._re
sults_directory, 'retries')) | 147 self._filesystem.maybe_make_directory(self._filesystem.join(self._re
sults_directory, 'retries')) |
145 results_directory = self._filesystem.join(self._results_directory, '
retries') | 148 results_directory = self._filesystem.join(self._results_directory, '
retries') |
146 return Worker(worker_connection, results_directory, self._options) | 149 return Worker(worker_connection, results_directory, self._options) |
147 | 150 |
148 def _mark_interrupted_tests_as_skipped(self, run_results): | 151 def _mark_interrupted_tests_as_skipped(self, run_results): |
149 for test_input in self._test_inputs: | 152 for test_input in self._test_inputs: |
150 if test_input.test_name not in run_results.results_by_name: | 153 if test_input.test_name not in run_results.results_by_name: |
151 result = test_results.TestResult(test_input.test_name, [test_fai
lures.FailureEarlyExit()]) | 154 result = test_results.TestResult(test_input.test_name, [test_fai
lures.FailureEarlyExit()]) |
152 # FIXME: We probably need to loop here if there are multiple ite
rations. | 155 # FIXME: We probably need to loop here if there are multiple ite
rations. |
153 # FIXME: Also, these results are really neither expected nor une
xpected. We probably | 156 # FIXME: Also, these results are really neither expected nor une
xpected. We probably |
154 # need a third type of result. | 157 # need a third type of result. |
155 run_results.add(result, expected=False, test_is_slow=self._test_
is_slow(test_input.test_name)) | 158 run_results.add(result, expected=False, test_is_slow=self._test_
is_slow(test_input.test_name)) |
156 | 159 |
157 def _interrupt_if_at_failure_limits(self, run_results): | 160 def _interrupt_if_at_failure_limits(self, run_results): |
158 # Note: The messages in this method are constructed to match old-run-web
kit-tests | 161 # Note: The messages in this method are constructed to match old-run-web
kit-tests |
159 # so that existing buildbot grep rules work. | 162 # so that existing buildbot grep rules work. |
160 def interrupt_if_at_failure_limit(limit, failure_count, run_results, mes
sage): | 163 def interrupt_if_at_failure_limit(limit, failure_count, run_results, mes
sage): |
161 if limit and failure_count >= limit: | 164 if limit and failure_count >= limit: |
162 message += " %d tests run." % (run_results.expected + run_result
s.unexpected) | 165 message += ' %d tests run.' % (run_results.expected + run_result
s.unexpected) |
163 self._mark_interrupted_tests_as_skipped(run_results) | 166 self._mark_interrupted_tests_as_skipped(run_results) |
164 raise TestRunInterruptedException(message) | 167 raise TestRunInterruptedException(message) |
165 | 168 |
166 interrupt_if_at_failure_limit( | 169 interrupt_if_at_failure_limit( |
167 self._options.exit_after_n_failures, | 170 self._options.exit_after_n_failures, |
168 run_results.unexpected_failures, | 171 run_results.unexpected_failures, |
169 run_results, | 172 run_results, |
170 "Exiting early after %d failures." % run_results.unexpected_failures
) | 173 'Exiting early after %d failures.' % run_results.unexpected_failures
) |
171 interrupt_if_at_failure_limit( | 174 interrupt_if_at_failure_limit( |
172 self._options.exit_after_n_crashes_or_timeouts, | 175 self._options.exit_after_n_crashes_or_timeouts, |
173 run_results.unexpected_crashes + run_results.unexpected_timeouts, | 176 run_results.unexpected_crashes + run_results.unexpected_timeouts, |
174 run_results, | 177 run_results, |
175 # This differs from ORWT because it does not include WebProcess cras
hes. | 178 # This differs from ORWT because it does not include WebProcess cras
hes. |
176 "Exiting early after %d crashes and %d timeouts." % (run_results.une
xpected_crashes, run_results.unexpected_timeouts)) | 179 'Exiting early after %d crashes and %d timeouts.' % (run_results.une
xpected_crashes, run_results.unexpected_timeouts)) |
177 | 180 |
178 def _update_summary_with_result(self, run_results, result): | 181 def _update_summary_with_result(self, run_results, result): |
179 expected = self._expectations.matches_an_expected_result(result.test_nam
e, result.type, self._options.pixel_tests or result.reftest_type, self._options.
enable_sanitizer) | 182 expected = self._expectations.matches_an_expected_result( |
| 183 result.test_name, |
| 184 result.type, |
| 185 self._options.pixel_tests or result.reftest_type, |
| 186 self._options.enable_sanitizer) |
180 exp_str = self._expectations.get_expectations_string(result.test_name) | 187 exp_str = self._expectations.get_expectations_string(result.test_name) |
181 got_str = self._expectations.expectation_to_string(result.type) | 188 got_str = self._expectations.expectation_to_string(result.type) |
182 | 189 |
183 if result.device_failed: | 190 if result.device_failed: |
184 self._printer.print_finished_test(result, False, exp_str, "Aborted") | 191 self._printer.print_finished_test(result, False, exp_str, 'Aborted') |
185 return | 192 return |
186 | 193 |
187 run_results.add(result, expected, self._test_is_slow(result.test_name)) | 194 run_results.add(result, expected, self._test_is_slow(result.test_name)) |
188 self._printer.print_finished_test(result, expected, exp_str, got_str) | 195 self._printer.print_finished_test(result, expected, exp_str, got_str) |
189 self._interrupt_if_at_failure_limits(run_results) | 196 self._interrupt_if_at_failure_limits(run_results) |
190 | 197 |
191 def handle(self, name, source, *args): | 198 def handle(self, name, source, *args): |
192 method = getattr(self, '_handle_' + name) | 199 method = getattr(self, '_handle_' + name) |
193 if method: | 200 if method: |
194 return method(source, *args) | 201 return method(source, *args) |
195 raise AssertionError('unknown message %s received from %s, args=%s' % (n
ame, source, repr(args))) | 202 raise AssertionError('unknown message %s received from %s, args=%s' % (n
ame, source, repr(args))) |
196 | 203 |
197 def _handle_started_test(self, worker_name, test_input, test_timeout_sec): | 204 def _handle_started_test(self, worker_name, test_input, test_timeout_sec): |
198 self._printer.print_started_test(test_input.test_name) | 205 self._printer.print_started_test(test_input.test_name) |
199 | 206 |
200 def _handle_finished_test_list(self, worker_name, list_name): | 207 def _handle_finished_test_list(self, worker_name, list_name): |
201 pass | 208 pass |
202 | 209 |
203 def _handle_finished_test(self, worker_name, result, log_messages=[]): | 210 def _handle_finished_test(self, worker_name, result, log_messages=[]): |
204 self._update_summary_with_result(self._current_run_results, result) | 211 self._update_summary_with_result(self._current_run_results, result) |
205 | 212 |
206 def _handle_device_failed(self, worker_name, list_name, remaining_tests): | 213 def _handle_device_failed(self, worker_name, list_name, remaining_tests): |
207 _log.warning("%s has failed" % worker_name) | 214 _log.warning('%s has failed' % worker_name) |
208 if remaining_tests: | 215 if remaining_tests: |
209 self._shards_to_redo.append(TestShard(list_name, remaining_tests)) | 216 self._shards_to_redo.append(TestShard(list_name, remaining_tests)) |
210 | 217 |
| 218 |
211 class Worker(object): | 219 class Worker(object): |
| 220 |
212 def __init__(self, caller, results_directory, options): | 221 def __init__(self, caller, results_directory, options): |
213 self._caller = caller | 222 self._caller = caller |
214 self._worker_number = caller.worker_number | 223 self._worker_number = caller.worker_number |
215 self._name = caller.name | 224 self._name = caller.name |
216 self._results_directory = results_directory | 225 self._results_directory = results_directory |
217 self._options = options | 226 self._options = options |
218 | 227 |
219 # The remaining fields are initialized in start() | 228 # The remaining fields are initialized in start() |
220 self._host = None | 229 self._host = None |
221 self._port = None | 230 self._port = None |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
278 self._driver = self._port.create_driver(self._worker_number) | 287 self._driver = self._port.create_driver(self._worker_number) |
279 | 288 |
280 if not self._driver: | 289 if not self._driver: |
281 # FIXME: Is this the best way to handle a device crashing in the mid
dle of the test, or should we create | 290 # FIXME: Is this the best way to handle a device crashing in the mid
dle of the test, or should we create |
282 # a new failure type? | 291 # a new failure type? |
283 device_failed = True | 292 device_failed = True |
284 return device_failed | 293 return device_failed |
285 | 294 |
286 self._caller.post('started_test', test_input, test_timeout_sec) | 295 self._caller.post('started_test', test_input, test_timeout_sec) |
287 result = single_test_runner.run_single_test(self._port, self._options, s
elf._results_directory, | 296 result = single_test_runner.run_single_test(self._port, self._options, s
elf._results_directory, |
288 self._name, self._driver, test_input, stop_when_done) | 297 self._name, self._driver, te
st_input, stop_when_done) |
289 | 298 |
290 result.shard_name = shard_name | 299 result.shard_name = shard_name |
291 result.worker_name = self._name | 300 result.worker_name = self._name |
292 result.total_run_time = time.time() - start | 301 result.total_run_time = time.time() - start |
293 result.test_number = self._num_tests | 302 result.test_number = self._num_tests |
294 self._num_tests += 1 | 303 self._num_tests += 1 |
295 self._caller.post('finished_test', result) | 304 self._caller.post('finished_test', result) |
296 self._clean_up_after_test(test_input, result) | 305 self._clean_up_after_test(test_input, result) |
297 return result.device_failed | 306 return result.device_failed |
298 | 307 |
299 def stop(self): | 308 def stop(self): |
300 _log.debug("%s cleaning up" % self._name) | 309 _log.debug('%s cleaning up' % self._name) |
301 self._kill_driver() | 310 self._kill_driver() |
302 | 311 |
303 def _timeout(self, test_input): | 312 def _timeout(self, test_input): |
304 """Compute the appropriate timeout value for a test.""" | 313 """Compute the appropriate timeout value for a test.""" |
305 # The driver watchdog uses 2.5x the timeout; we want to be | 314 # The driver watchdog uses 2.5x the timeout; we want to be |
306 # larger than that. We also add a little more padding if we're | 315 # larger than that. We also add a little more padding if we're |
307 # running tests in a separate thread. | 316 # running tests in a separate thread. |
308 # | 317 # |
309 # Note that we need to convert the test timeout from a | 318 # Note that we need to convert the test timeout from a |
310 # string value in milliseconds to a float for Python. | 319 # string value in milliseconds to a float for Python. |
311 | 320 |
312 # FIXME: Can we just return the test_input.timeout now? | 321 # FIXME: Can we just return the test_input.timeout now? |
313 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0 | 322 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0 |
314 | 323 |
315 def _kill_driver(self): | 324 def _kill_driver(self): |
316 # Be careful about how and when we kill the driver; if driver.stop() | 325 # Be careful about how and when we kill the driver; if driver.stop() |
317 # raises an exception, this routine may get re-entered via __del__. | 326 # raises an exception, this routine may get re-entered via __del__. |
318 driver = self._driver | 327 driver = self._driver |
319 self._driver = None | 328 self._driver = None |
320 if driver: | 329 if driver: |
321 _log.debug("%s killing driver" % self._name) | 330 _log.debug('%s killing driver' % self._name) |
322 driver.stop() | 331 driver.stop() |
323 | 332 |
324 | |
325 def _clean_up_after_test(self, test_input, result): | 333 def _clean_up_after_test(self, test_input, result): |
326 test_name = test_input.test_name | 334 test_name = test_input.test_name |
327 | 335 |
328 if result.failures: | 336 if result.failures: |
329 # Check and kill the driver if we need to. | 337 # Check and kill the driver if we need to. |
330 if any([f.driver_needs_restart() for f in result.failures]): | 338 if any([f.driver_needs_restart() for f in result.failures]): |
331 self._kill_driver() | 339 self._kill_driver() |
332 # Reset the batch count since the shell just bounced. | 340 # Reset the batch count since the shell just bounced. |
333 self._batch_count = 0 | 341 self._batch_count = 0 |
334 | 342 |
335 # Print the error message(s). | 343 # Print the error message(s). |
336 _log.debug("%s %s failed:" % (self._name, test_name)) | 344 _log.debug('%s %s failed:' % (self._name, test_name)) |
337 for f in result.failures: | 345 for f in result.failures: |
338 _log.debug("%s %s" % (self._name, f.message())) | 346 _log.debug('%s %s' % (self._name, f.message())) |
339 elif result.type == test_expectations.SKIP: | 347 elif result.type == test_expectations.SKIP: |
340 _log.debug("%s %s skipped" % (self._name, test_name)) | 348 _log.debug('%s %s skipped' % (self._name, test_name)) |
341 else: | 349 else: |
342 _log.debug("%s %s passed" % (self._name, test_name)) | 350 _log.debug('%s %s passed' % (self._name, test_name)) |
343 | 351 |
344 | 352 |
345 class TestShard(object): | 353 class TestShard(object): |
| 354 |
346 """A test shard is a named list of TestInputs.""" | 355 """A test shard is a named list of TestInputs.""" |
347 | 356 |
348 def __init__(self, name, test_inputs): | 357 def __init__(self, name, test_inputs): |
349 self.name = name | 358 self.name = name |
350 self.test_inputs = test_inputs | 359 self.test_inputs = test_inputs |
351 self.requires_lock = test_inputs[0].requires_lock | 360 self.requires_lock = test_inputs[0].requires_lock |
352 | 361 |
353 def __repr__(self): | 362 def __repr__(self): |
354 return "TestShard(name='%s', test_inputs=%s, requires_lock=%s'" % (self.
name, self.test_inputs, self.requires_lock) | 363 return "TestShard(name='%s', test_inputs=%s, requires_lock=%s'" % (self.
name, self.test_inputs, self.requires_lock) |
355 | 364 |
356 def __eq__(self, other): | 365 def __eq__(self, other): |
357 return self.name == other.name and self.test_inputs == other.test_inputs | 366 return self.name == other.name and self.test_inputs == other.test_inputs |
358 | 367 |
359 | 368 |
360 class Sharder(object): | 369 class Sharder(object): |
| 370 |
361 def __init__(self, test_split_fn, max_locked_shards): | 371 def __init__(self, test_split_fn, max_locked_shards): |
362 self._split = test_split_fn | 372 self._split = test_split_fn |
363 self._max_locked_shards = max_locked_shards | 373 self._max_locked_shards = max_locked_shards |
364 | 374 |
365 def shard_tests(self, test_inputs, num_workers, fully_parallel, run_singly): | 375 def shard_tests(self, test_inputs, num_workers, fully_parallel, run_singly): |
366 """Groups tests into batches. | 376 """Groups tests into batches. |
367 This helps ensure that tests that depend on each other (aka bad tests!) | 377 This helps ensure that tests that depend on each other (aka bad tests!) |
368 continue to run together as most cross-tests dependencies tend to | 378 continue to run together as most cross-tests dependencies tend to |
369 occur within the same directory. | 379 occur within the same directory. |
370 Return: | 380 Return: |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
422 # of content_shell, it's too slow to shard them fully. | 432 # of content_shell, it's too slow to shard them fully. |
423 virtual_inputs.append(test_input) | 433 virtual_inputs.append(test_input) |
424 else: | 434 else: |
425 unlocked_shards.append(TestShard('.', [test_input])) | 435 unlocked_shards.append(TestShard('.', [test_input])) |
426 | 436 |
427 locked_virtual_shards, unlocked_virtual_shards = self._shard_by_director
y(virtual_inputs) | 437 locked_virtual_shards, unlocked_virtual_shards = self._shard_by_director
y(virtual_inputs) |
428 | 438 |
429 # The locked shards still need to be limited to self._max_locked_shards
in order to not | 439 # The locked shards still need to be limited to self._max_locked_shards
in order to not |
430 # overload the http server for the http tests. | 440 # overload the http server for the http tests. |
431 return (self._resize_shards(locked_virtual_shards + locked_shards, self.
_max_locked_shards, 'locked_shard'), | 441 return (self._resize_shards(locked_virtual_shards + locked_shards, self.
_max_locked_shards, 'locked_shard'), |
432 unlocked_virtual_shards + unlocked_shards) | 442 unlocked_virtual_shards + unlocked_shards) |
433 | 443 |
434 def _shard_by_directory(self, test_inputs): | 444 def _shard_by_directory(self, test_inputs): |
435 """Returns two lists of shards, each shard containing all the files in a
directory. | 445 """Returns two lists of shards, each shard containing all the files in a
directory. |
436 | 446 |
437 This is the default mode, and gets as much parallelism as we can while | 447 This is the default mode, and gets as much parallelism as we can while |
438 minimizing flakiness caused by inter-test dependencies.""" | 448 minimizing flakiness caused by inter-test dependencies.""" |
439 locked_shards = [] | 449 locked_shards = [] |
440 unlocked_shards = [] | 450 unlocked_shards = [] |
441 unlocked_slow_shards = [] | 451 unlocked_slow_shards = [] |
442 tests_by_dir = {} | 452 tests_by_dir = {} |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
497 def split_at(seq, index): | 507 def split_at(seq, index): |
498 return (seq[:index], seq[index:]) | 508 return (seq[:index], seq[index:]) |
499 | 509 |
500 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) | 510 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) |
501 new_shards = [] | 511 new_shards = [] |
502 remaining_shards = old_shards | 512 remaining_shards = old_shards |
503 while remaining_shards: | 513 while remaining_shards: |
504 some_shards, remaining_shards = split_at(remaining_shards, num_old_p
er_new) | 514 some_shards, remaining_shards = split_at(remaining_shards, num_old_p
er_new) |
505 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh
ards) + 1), extract_and_flatten(some_shards))) | 515 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh
ards) + 1), extract_and_flatten(some_shards))) |
506 return new_shards | 516 return new_shards |
OLD | NEW |