Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright (C) 2011 Google Inc. All rights reserved. | 1 # Copyright (C) 2011 Google Inc. All rights reserved. |
| 2 # | 2 # |
| 3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
| 4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
| 5 # met: | 5 # met: |
| 6 # | 6 # |
| 7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
| 8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
| 9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
| 10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 28 | 28 |
| 29 import logging | 29 import logging |
| 30 import math | 30 import math |
| 31 import threading | |
| 32 import time | 31 import time |
| 33 | 32 |
| 34 from webkitpy.common import message_pool | 33 from webkitpy.common import message_pool |
| 35 from webkitpy.layout_tests.controllers import single_test_runner | 34 from webkitpy.layout_tests.controllers import single_test_runner |
| 36 from webkitpy.layout_tests.models.test_run_results import TestRunResults | 35 from webkitpy.layout_tests.models.test_run_results import TestRunResults |
| 37 from webkitpy.layout_tests.models import test_expectations | 36 from webkitpy.layout_tests.models import test_expectations |
| 38 from webkitpy.layout_tests.models import test_failures | 37 from webkitpy.layout_tests.models import test_failures |
| 39 from webkitpy.layout_tests.models import test_results | 38 from webkitpy.layout_tests.models import test_results |
| 40 from webkitpy.tool import grammar | 39 from webkitpy.tool import grammar |
| 41 | 40 |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 201 pass | 200 pass |
| 202 | 201 |
| 203 def _handle_finished_test(self, worker_name, result, log_messages=[]): | 202 def _handle_finished_test(self, worker_name, result, log_messages=[]): |
| 204 self._update_summary_with_result(self._current_run_results, result) | 203 self._update_summary_with_result(self._current_run_results, result) |
| 205 | 204 |
| 206 def _handle_device_failed(self, worker_name, list_name, remaining_tests): | 205 def _handle_device_failed(self, worker_name, list_name, remaining_tests): |
| 207 _log.warning("%s has failed" % worker_name) | 206 _log.warning("%s has failed" % worker_name) |
| 208 if remaining_tests: | 207 if remaining_tests: |
| 209 self._shards_to_redo.append(TestShard(list_name, remaining_tests)) | 208 self._shards_to_redo.append(TestShard(list_name, remaining_tests)) |
| 210 | 209 |
| 210 | |
| 211 class Worker(object): | 211 class Worker(object): |
| 212 def __init__(self, caller, results_directory, options): | 212 def __init__(self, caller, results_directory, options): |
| 213 self._caller = caller | 213 self._caller = caller |
| 214 self._worker_number = caller.worker_number | 214 self._worker_number = caller.worker_number |
| 215 self._name = caller.name | 215 self._name = caller.name |
| 216 self._results_directory = results_directory | 216 self._results_directory = results_directory |
| 217 self._options = options | 217 self._options = options |
| 218 | 218 |
| 219 # The remaining fields are initialized in start() | 219 # The remaining fields are initialized in start() |
| 220 self._host = None | 220 self._host = None |
| 221 self._port = None | 221 self._port = None |
| 222 self._batch_size = None | 222 self._batch_size = None |
| 223 self._batch_count = None | 223 self._batch_count = None |
| 224 self._filesystem = None | 224 self._filesystem = None |
| 225 self._driver = None | 225 self._primary_driver = None |
| 226 self._secondary_driver = None | |
| 226 self._num_tests = 0 | 227 self._num_tests = 0 |
| 227 | 228 |
| 228 def __del__(self): | 229 def __del__(self): |
| 229 self.stop() | 230 self.stop() |
| 230 | 231 |
| 231 def start(self): | 232 def start(self): |
| 232 """This method is called when the object is starting to be used and it i s safe | 233 """This method is called when the object is starting to be used and it i s safe |
| 233 for the object to create state that does not need to be pickled (usually this means | 234 for the object to create state that does not need to be pickled (usually this means |
| 234 it is called in a child process).""" | 235 it is called in a child process).""" |
| 235 self._host = self._caller.host | 236 self._host = self._caller.host |
| 236 self._filesystem = self._host.filesystem | 237 self._filesystem = self._host.filesystem |
| 237 self._port = self._host.port_factory.get(self._options.platform, self._o ptions) | 238 self._port = self._host.port_factory.get(self._options.platform, self._o ptions) |
| 239 self._primary_driver = self._port.create_driver(self._worker_number) | |
| 240 self._secondary_driver = self._port.create_driver(self._worker_number) | |
| 238 | 241 |
| 239 self._batch_count = 0 | 242 self._batch_count = 0 |
| 240 self._batch_size = self._options.batch_size or 0 | 243 self._batch_size = self._options.batch_size or 0 |
| 241 | 244 |
| 242 def handle(self, name, source, test_list_name, test_inputs): | 245 def handle(self, name, source, test_list_name, test_inputs): |
| 243 assert name == 'test_list' | 246 assert name == 'test_list' |
| 244 for i, test_input in enumerate(test_inputs): | 247 for i, test_input in enumerate(test_inputs): |
| 245 device_failed = self._run_test(test_input, test_list_name) | 248 device_failed = self._run_test(test_input, test_list_name) |
| 246 if device_failed: | 249 if device_failed: |
| 247 self._caller.post('device_failed', test_list_name, test_inputs[i :]) | 250 self._caller.post('device_failed', test_list_name, test_inputs[i :]) |
| 248 self._caller.stop_running() | 251 self._caller.stop_running() |
| 249 return | 252 return |
| 250 | 253 |
| 254 # Kill the secondary driver at the end of each test shard. | |
| 255 self._kill_driver(self._secondary_driver, 'secondary') | |
|
Dirk Pranke
2015/06/03 19:03:40
Do we need to do this? Neither ojan nor I seemed t
joelo
2015/06/04 00:53:26
This was the only thing that gave option #2 (this
| |
| 256 | |
| 251 self._caller.post('finished_test_list', test_list_name) | 257 self._caller.post('finished_test_list', test_list_name) |
| 252 | 258 |
| 253 def _update_test_input(self, test_input): | 259 def _update_test_input(self, test_input): |
| 254 if test_input.reference_files is None: | 260 if test_input.reference_files is None: |
| 255 # Lazy initialization. | 261 # Lazy initialization. |
| 256 test_input.reference_files = self._port.reference_files(test_input.t est_name) | 262 test_input.reference_files = self._port.reference_files(test_input.t est_name) |
| 257 if test_input.reference_files: | 263 if test_input.reference_files: |
| 258 test_input.should_run_pixel_test = True | 264 test_input.should_run_pixel_test = True |
| 259 else: | 265 else: |
| 260 test_input.should_run_pixel_test = self._port.should_run_as_pixel_te st(test_input) | 266 test_input.should_run_pixel_test = self._port.should_run_as_pixel_te st(test_input) |
| 261 | 267 |
| 262 def _run_test(self, test_input, shard_name): | 268 def _run_test(self, test_input, shard_name): |
| 263 self._batch_count += 1 | 269 self._batch_count += 1 |
| 264 | 270 |
| 265 stop_when_done = False | 271 stop_when_done = False |
| 266 if self._batch_size > 0 and self._batch_count >= self._batch_size: | 272 if self._batch_size > 0 and self._batch_count >= self._batch_size: |
| 267 self._batch_count = 0 | 273 self._batch_count = 0 |
| 268 stop_when_done = True | 274 stop_when_done = True |
| 269 | 275 |
| 270 self._update_test_input(test_input) | 276 self._update_test_input(test_input) |
| 271 test_timeout_sec = self._timeout(test_input) | 277 test_timeout_sec = self._timeout(test_input) |
| 272 start = time.time() | 278 start = time.time() |
| 273 device_failed = False | 279 device_failed = False |
| 274 | 280 |
| 275 if self._driver and self._driver.has_crashed(): | |
| 276 self._kill_driver() | |
| 277 if not self._driver: | |
| 278 self._driver = self._port.create_driver(self._worker_number) | |
| 279 | |
| 280 if not self._driver: | |
| 281 # FIXME: Is this the best way to handle a device crashing in the mid dle of the test, or should we create | |
| 282 # a new failure type? | |
| 283 device_failed = True | |
| 284 return device_failed | |
| 285 | |
| 286 self._caller.post('started_test', test_input, test_timeout_sec) | 281 self._caller.post('started_test', test_input, test_timeout_sec) |
| 287 result = single_test_runner.run_single_test(self._port, self._options, s elf._results_directory, | 282 result = single_test_runner.run_single_test( |
| 288 self._name, self._driver, test_input, stop_when_done) | 283 self._port, self._options, self._results_directory, self._name, |
| 284 self._primary_driver, self._secondary_driver, test_input, | |
| 285 stop_when_done) | |
| 289 | 286 |
| 290 result.shard_name = shard_name | 287 result.shard_name = shard_name |
| 291 result.worker_name = self._name | 288 result.worker_name = self._name |
| 292 result.total_run_time = time.time() - start | 289 result.total_run_time = time.time() - start |
| 293 result.test_number = self._num_tests | 290 result.test_number = self._num_tests |
| 294 self._num_tests += 1 | 291 self._num_tests += 1 |
| 295 self._caller.post('finished_test', result) | 292 self._caller.post('finished_test', result) |
| 296 self._clean_up_after_test(test_input, result) | 293 self._clean_up_after_test(test_input, result) |
| 297 return result.device_failed | 294 return result.device_failed |
| 298 | 295 |
| 299 def stop(self): | 296 def stop(self): |
| 300 _log.debug("%s cleaning up" % self._name) | 297 _log.debug("%s cleaning up" % self._name) |
| 301 self._kill_driver() | 298 self._kill_driver(self._primary_driver, "primary") |
| 299 self._kill_driver(self._secondary_driver, "secondary") | |
| 302 | 300 |
| 303 def _timeout(self, test_input): | 301 def _timeout(self, test_input): |
| 304 """Compute the appropriate timeout value for a test.""" | 302 """Compute the appropriate timeout value for a test.""" |
| 305 # The driver watchdog uses 2.5x the timeout; we want to be | 303 # The driver watchdog uses 2.5x the timeout; we want to be |
| 306 # larger than that. We also add a little more padding if we're | 304 # larger than that. We also add a little more padding if we're |
| 307 # running tests in a separate thread. | 305 # running tests in a separate thread. |
| 308 # | 306 # |
| 309 # Note that we need to convert the test timeout from a | 307 # Note that we need to convert the test timeout from a |
| 310 # string value in milliseconds to a float for Python. | 308 # string value in milliseconds to a float for Python. |
| 311 | 309 |
| 312 # FIXME: Can we just return the test_input.timeout now? | 310 # FIXME: Can we just return the test_input.timeout now? |
| 313 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0 | 311 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0 |
| 314 | 312 |
| 315 def _kill_driver(self): | 313 def _kill_driver(self, driver, label): |
| 316 # Be careful about how and when we kill the driver; if driver.stop() | 314 # Be careful about how and when we kill the driver; if driver.stop() |
| 317 # raises an exception, this routine may get re-entered via __del__. | 315 # raises an exception, this routine may get re-entered via __del__. |
| 318 driver = self._driver | |
| 319 self._driver = None | |
| 320 if driver: | 316 if driver: |
| 321 _log.debug("%s killing driver" % self._name) | 317 _log.debug("%s killing %s driver" % (self._name, label)) |
| 322 driver.stop() | 318 driver.stop() |
| 323 | 319 |
| 324 | |
| 325 def _clean_up_after_test(self, test_input, result): | 320 def _clean_up_after_test(self, test_input, result): |
| 326 test_name = test_input.test_name | 321 test_name = test_input.test_name |
| 327 | 322 |
| 328 if result.failures: | 323 if result.failures: |
| 329 # Check and kill the driver if we need to. | 324 # Check and kill the driver if we need to. |
| 330 if any([f.driver_needs_restart() for f in result.failures]): | 325 if any([f.driver_needs_restart() for f in result.failures]): |
| 331 self._kill_driver() | 326 # FIXME: Need more information in failure reporting so |
| 327 # we know which driver needs to be restarted. For now | |
| 328 # we kill both drivers. | |
| 329 self._kill_driver(self._primary_driver, "primary") | |
| 330 self._kill_driver(self._secondary_driver, "secondary") | |
| 331 | |
| 332 # Reset the batch count since the shell just bounced. | 332 # Reset the batch count since the shell just bounced. |
| 333 self._batch_count = 0 | 333 self._batch_count = 0 |
| 334 | 334 |
| 335 # Print the error message(s). | 335 # Print the error message(s). |
| 336 _log.debug("%s %s failed:" % (self._name, test_name)) | 336 _log.debug("%s %s failed:" % (self._name, test_name)) |
| 337 for f in result.failures: | 337 for f in result.failures: |
| 338 _log.debug("%s %s" % (self._name, f.message())) | 338 _log.debug("%s %s" % (self._name, f.message())) |
| 339 elif result.type == test_expectations.SKIP: | 339 elif result.type == test_expectations.SKIP: |
| 340 _log.debug("%s %s skipped" % (self._name, test_name)) | 340 _log.debug("%s %s skipped" % (self._name, test_name)) |
| 341 else: | 341 else: |
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 497 def split_at(seq, index): | 497 def split_at(seq, index): |
| 498 return (seq[:index], seq[index:]) | 498 return (seq[:index], seq[index:]) |
| 499 | 499 |
| 500 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) | 500 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) |
| 501 new_shards = [] | 501 new_shards = [] |
| 502 remaining_shards = old_shards | 502 remaining_shards = old_shards |
| 503 while remaining_shards: | 503 while remaining_shards: |
| 504 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new) | 504 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new) |
| 505 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards))) | 505 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards))) |
| 506 return new_shards | 506 return new_shards |
| OLD | NEW |