Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright (C) 2011 Google Inc. All rights reserved. | 1 # Copyright (C) 2011 Google Inc. All rights reserved. |
| 2 # | 2 # |
| 3 # Redistribution and use in source and binary forms, with or without | 3 # Redistribution and use in source and binary forms, with or without |
| 4 # modification, are permitted provided that the following conditions are | 4 # modification, are permitted provided that the following conditions are |
| 5 # met: | 5 # met: |
| 6 # | 6 # |
| 7 # * Redistributions of source code must retain the above copyright | 7 # * Redistributions of source code must retain the above copyright |
| 8 # notice, this list of conditions and the following disclaimer. | 8 # notice, this list of conditions and the following disclaimer. |
| 9 # * Redistributions in binary form must reproduce the above | 9 # * Redistributions in binary form must reproduce the above |
| 10 # copyright notice, this list of conditions and the following disclaimer | 10 # copyright notice, this list of conditions and the following disclaimer |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 28 | 28 |
| 29 import logging | 29 import logging |
| 30 import math | 30 import math |
| 31 import threading | |
| 32 import time | 31 import time |
| 33 | 32 |
| 34 from webkitpy.common import message_pool | 33 from webkitpy.common import message_pool |
| 35 from webkitpy.layout_tests.controllers import single_test_runner | 34 from webkitpy.layout_tests.controllers import single_test_runner |
| 36 from webkitpy.layout_tests.models.test_run_results import TestRunResults | 35 from webkitpy.layout_tests.models.test_run_results import TestRunResults |
| 37 from webkitpy.layout_tests.models import test_expectations | 36 from webkitpy.layout_tests.models import test_expectations |
| 38 from webkitpy.layout_tests.models import test_failures | 37 from webkitpy.layout_tests.models import test_failures |
| 39 from webkitpy.layout_tests.models import test_results | 38 from webkitpy.layout_tests.models import test_results |
| 40 from webkitpy.tool import grammar | 39 from webkitpy.tool import grammar |
| 41 | 40 |
| (...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 201 pass | 200 pass |
| 202 | 201 |
| 203 def _handle_finished_test(self, worker_name, result, log_messages=[]): | 202 def _handle_finished_test(self, worker_name, result, log_messages=[]): |
| 204 self._update_summary_with_result(self._current_run_results, result) | 203 self._update_summary_with_result(self._current_run_results, result) |
| 205 | 204 |
| 206 def _handle_device_failed(self, worker_name, list_name, remaining_tests): | 205 def _handle_device_failed(self, worker_name, list_name, remaining_tests): |
| 207 _log.warning("%s has failed" % worker_name) | 206 _log.warning("%s has failed" % worker_name) |
| 208 if remaining_tests: | 207 if remaining_tests: |
| 209 self._shards_to_redo.append(TestShard(list_name, remaining_tests)) | 208 self._shards_to_redo.append(TestShard(list_name, remaining_tests)) |
| 210 | 209 |
| 210 | |
| 211 class Worker(object): | 211 class Worker(object): |
| 212 def __init__(self, caller, results_directory, options): | 212 def __init__(self, caller, results_directory, options): |
| 213 self._caller = caller | 213 self._caller = caller |
| 214 self._worker_number = caller.worker_number | 214 self._worker_number = caller.worker_number |
| 215 self._name = caller.name | 215 self._name = caller.name |
| 216 self._results_directory = results_directory | 216 self._results_directory = results_directory |
| 217 self._options = options | 217 self._options = options |
| 218 | 218 |
| 219 # The remaining fields are initialized in start() | 219 # The remaining fields are initialized in start() |
| 220 self._host = None | 220 self._host = None |
| 221 self._port = None | 221 self._port = None |
| 222 self._batch_size = None | 222 self._batch_size = None |
| 223 self._batch_count = None | 223 self._batch_count = None |
| 224 self._filesystem = None | 224 self._filesystem = None |
| 225 self._driver = None | 225 self._default_driver_handler = None |
| 226 self._virtual_driver_handler = None | |
| 226 self._num_tests = 0 | 227 self._num_tests = 0 |
| 227 | 228 |
| 228 def __del__(self): | 229 def __del__(self): |
| 229 self.stop() | 230 self.stop() |
| 230 | 231 |
| 231 def start(self): | 232 def start(self): |
| 232 """This method is called when the object is starting to be used and it i s safe | 233 """This method is called when the object is starting to be used and it i s safe |
| 233 for the object to create state that does not need to be pickled (usually this means | 234 for the object to create state that does not need to be pickled (usually this means |
| 234 it is called in a child process).""" | 235 it is called in a child process).""" |
| 235 self._host = self._caller.host | 236 self._host = self._caller.host |
| 236 self._filesystem = self._host.filesystem | 237 self._filesystem = self._host.filesystem |
| 237 self._port = self._host.port_factory.get(self._options.platform, self._o ptions) | 238 self._port = self._host.port_factory.get(self._options.platform, self._o ptions) |
| 238 | 239 |
| 239 self._batch_count = 0 | 240 self._batch_count = 0 |
| 240 self._batch_size = self._options.batch_size or 0 | 241 self._batch_size = self._options.batch_size or 0 |
| 241 | 242 |
| 243 self._default_driver_handler = DriverHandler( | |
| 244 'default', self._worker_number, self._name, self._port) | |
| 245 self._virtual_driver_handler = DriverHandler( | |
| 246 'virtual', self._worker_number, self._name, self._port) | |
| 247 | |
| 242 def handle(self, name, source, test_list_name, test_inputs): | 248 def handle(self, name, source, test_list_name, test_inputs): |
| 243 assert name == 'test_list' | 249 assert name == 'test_list' |
| 244 for i, test_input in enumerate(test_inputs): | 250 for i, test_input in enumerate(test_inputs): |
| 245 device_failed = self._run_test(test_input, test_list_name) | 251 device_failed = self._run_test(test_input, test_list_name) |
| 246 if device_failed: | 252 if device_failed: |
| 247 self._caller.post('device_failed', test_list_name, test_inputs[i :]) | 253 self._caller.post('device_failed', test_list_name, test_inputs[i :]) |
| 248 self._caller.stop_running() | 254 self._caller.stop_running() |
| 249 return | 255 return |
| 250 | 256 |
| 251 self._caller.post('finished_test_list', test_list_name) | 257 self._caller.post('finished_test_list', test_list_name) |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 265 stop_when_done = False | 271 stop_when_done = False |
| 266 if self._batch_size > 0 and self._batch_count >= self._batch_size: | 272 if self._batch_size > 0 and self._batch_count >= self._batch_size: |
| 267 self._batch_count = 0 | 273 self._batch_count = 0 |
| 268 stop_when_done = True | 274 stop_when_done = True |
| 269 | 275 |
| 270 self._update_test_input(test_input) | 276 self._update_test_input(test_input) |
| 271 test_timeout_sec = self._timeout(test_input) | 277 test_timeout_sec = self._timeout(test_input) |
| 272 start = time.time() | 278 start = time.time() |
| 273 device_failed = False | 279 device_failed = False |
| 274 | 280 |
| 275 if self._driver and self._driver.has_crashed(): | 281 self._default_driver_handler.prepare() |
| 276 self._kill_driver() | 282 is_virtual = self._port.is_virtual_test(test_input.test_name) |
| 277 if not self._driver: | 283 if is_virtual: |
| 278 self._driver = self._port.create_driver(self._worker_number) | 284 # Lazily initialize or restart virtual driver. |
| 285 self._virtual_driver_handler.prepare() | |
| 279 | 286 |
| 280 if not self._driver: | 287 if (not self._default_driver_handler.is_prepared() or |
| 288 (is_virtual and not self._virtual_driver_handler.is_prepared())) : | |
| 281 # FIXME: Is this the best way to handle a device crashing in the mid dle of the test, or should we create | 289 # FIXME: Is this the best way to handle a device crashing in the mid dle of the test, or should we create |
| 282 # a new failure type? | 290 # a new failure type? |
| 283 device_failed = True | 291 device_failed = True |
| 284 return device_failed | 292 return device_failed |
| 285 | 293 |
| 286 self._caller.post('started_test', test_input, test_timeout_sec) | 294 self._caller.post('started_test', test_input, test_timeout_sec) |
| 287 result = single_test_runner.run_single_test(self._port, self._options, s elf._results_directory, | 295 result = single_test_runner.run_single_test( |
| 288 self._name, self._driver, test_input, stop_when_done) | 296 self._port, self._options, self._results_directory, self._name, |
| 297 self._default_driver_handler.driver, | |
| 298 self._virtual_driver_handler.driver, test_input, stop_when_done) | |
| 289 | 299 |
| 290 result.shard_name = shard_name | 300 result.shard_name = shard_name |
| 291 result.worker_name = self._name | 301 result.worker_name = self._name |
| 292 result.total_run_time = time.time() - start | 302 result.total_run_time = time.time() - start |
| 293 result.test_number = self._num_tests | 303 result.test_number = self._num_tests |
| 294 self._num_tests += 1 | 304 self._num_tests += 1 |
| 295 self._caller.post('finished_test', result) | 305 self._caller.post('finished_test', result) |
| 296 self._clean_up_after_test(test_input, result) | 306 self._clean_up_after_test(test_input, result) |
| 297 return result.device_failed | 307 return result.device_failed |
| 298 | 308 |
| 299 def stop(self): | 309 def stop(self): |
| 300 _log.debug("%s cleaning up" % self._name) | 310 _log.debug("%s cleaning up" % self._name) |
| 301 self._kill_driver() | 311 if self._default_driver_handler: |
| 312 self._default_driver_handler.kill() | |
| 313 if self._virtual_driver_handler: | |
| 314 self._virtual_driver_handler.kill() | |
| 302 | 315 |
| 303 def _timeout(self, test_input): | 316 def _timeout(self, test_input): |
| 304 """Compute the appropriate timeout value for a test.""" | 317 """Compute the appropriate timeout value for a test.""" |
| 305 # The driver watchdog uses 2.5x the timeout; we want to be | 318 # The driver watchdog uses 2.5x the timeout; we want to be |
| 306 # larger than that. We also add a little more padding if we're | 319 # larger than that. We also add a little more padding if we're |
| 307 # running tests in a separate thread. | 320 # running tests in a separate thread. |
| 308 # | 321 # |
| 309 # Note that we need to convert the test timeout from a | 322 # Note that we need to convert the test timeout from a |
| 310 # string value in milliseconds to a float for Python. | 323 # string value in milliseconds to a float for Python. |
| 311 | 324 |
| 312 # FIXME: Can we just return the test_input.timeout now? | 325 # FIXME: Can we just return the test_input.timeout now? |
| 313 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0 | 326 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0 |
| 314 | 327 |
| 315 def _kill_driver(self): | |
| 316 # Be careful about how and when we kill the driver; if driver.stop() | |
| 317 # raises an exception, this routine may get re-entered via __del__. | |
| 318 driver = self._driver | |
| 319 self._driver = None | |
| 320 if driver: | |
| 321 _log.debug("%s killing driver" % self._name) | |
| 322 driver.stop() | |
| 323 | |
| 324 | |
| 325 def _clean_up_after_test(self, test_input, result): | 328 def _clean_up_after_test(self, test_input, result): |
| 326 test_name = test_input.test_name | 329 test_name = test_input.test_name |
| 327 | 330 |
| 328 if result.failures: | 331 if result.failures: |
| 329 # Check and kill the driver if we need to. | 332 # Check and kill the driver if we need to. |
| 330 if any([f.driver_needs_restart() for f in result.failures]): | 333 if any([f.driver_needs_restart() for f in result.failures]): |
| 331 self._kill_driver() | 334 self._default_driver_handler.kill() |
| 335 if self._port.is_virtual_test(test_input.test_name): | |
| 336 # FIXME: Need more information in failure reporting so | |
| 337 # we know which driver needs to be restarted for virtual | |
| 338 # tests. For now we restart both drivers. | |
| 339 self._virtual_driver_handler.kill() | |
| 340 | |
| 332 # Reset the batch count since the shell just bounced. | 341 # Reset the batch count since the shell just bounced. |
| 333 self._batch_count = 0 | 342 self._batch_count = 0 |
| 334 | 343 |
| 335 # Print the error message(s). | 344 # Print the error message(s). |
| 336 _log.debug("%s %s failed:" % (self._name, test_name)) | 345 _log.debug("%s %s failed:" % (self._name, test_name)) |
| 337 for f in result.failures: | 346 for f in result.failures: |
| 338 _log.debug("%s %s" % (self._name, f.message())) | 347 _log.debug("%s %s" % (self._name, f.message())) |
| 339 elif result.type == test_expectations.SKIP: | 348 elif result.type == test_expectations.SKIP: |
| 340 _log.debug("%s %s skipped" % (self._name, test_name)) | 349 _log.debug("%s %s skipped" % (self._name, test_name)) |
| 341 else: | 350 else: |
| 342 _log.debug("%s %s passed" % (self._name, test_name)) | 351 _log.debug("%s %s passed" % (self._name, test_name)) |
| 343 | 352 |
| 344 | 353 |
| 354 class DriverHandler(object): | |
| 355 """Maintains the lifecycle of a driver for a given worker. | |
|
Dirk Pranke
2015/06/01 22:06:20
I'm not seeing what the value of this class is; it
joelo
2015/06/02 00:07:04
There's actually no new logic here -- the worker c
Dirk Pranke
2015/06/02 19:06:04
Oh, I see the confusion.
The docstring is wrong :
joelo
2015/06/02 23:49:17
Ah, I see how the driver gets reset now. Let me re
| |
| 356 | |
| 357 Attributes: | |
| 358 driver: The current Driver object, can be None. | |
| 359 label: Label for this driver, used for logging purposes. | |
| 360 """ | |
| 361 | |
| 362 def __init__(self, label, worker_number, worker_name, port): | |
| 363 self.driver = None | |
| 364 self.label = label | |
| 365 self._worker_number = worker_number | |
| 366 self._worker_name = worker_name | |
| 367 self._port = port | |
| 368 | |
| 369 def _create(self): | |
| 370 # Create a driver if one doesn't exist. | |
| 371 if not self.driver: | |
| 372 self.driver = self._port.create_driver(self._worker_number) | |
| 373 | |
| 374 def is_prepared(self): | |
| 375 """Returns true if the driver is initialized and in a good state.""" | |
| 376 return self.driver and not self.driver.has_crashed() | |
| 377 | |
| 378 def prepare(self): | |
| 379 """Prepares the driver for testing. | |
| 380 | |
| 381 Makes sure the driver is properly initialized and in a good state, | |
| 382 restarting it if it has crashed. | |
| 383 """ | |
| 384 if self.driver and self.driver.has_crashed(): | |
| 385 self.kill() | |
| 386 if not self.driver: | |
| 387 self.driver = self._port.create_driver(self._worker_number) | |
| 388 | |
| 389 def kill(self): | |
| 390 """Shuts down the driver.""" | |
| 391 # Be careful about how and when we kill the driver; if driver.stop() | |
| 392 # raises an exception, this routine may get re-entered via __del__. | |
| 393 if self.driver: | |
| 394 driver = self.driver | |
| 395 self.driver = None | |
| 396 _log.debug( | |
| 397 "%s killing driver: %s" % (self._worker_name, self.label)) | |
| 398 driver.stop() | |
| 399 | |
| 400 | |
| 345 class TestShard(object): | 401 class TestShard(object): |
| 346 """A test shard is a named list of TestInputs.""" | 402 """A test shard is a named list of TestInputs.""" |
| 347 | 403 |
| 348 def __init__(self, name, test_inputs): | 404 def __init__(self, name, test_inputs): |
| 349 self.name = name | 405 self.name = name |
| 350 self.test_inputs = test_inputs | 406 self.test_inputs = test_inputs |
| 351 self.requires_lock = test_inputs[0].requires_lock | 407 self.requires_lock = test_inputs[0].requires_lock |
| 352 | 408 |
| 353 def __repr__(self): | 409 def __repr__(self): |
| 354 return "TestShard(name='%s', test_inputs=%s, requires_lock=%s'" % (self. name, self.test_inputs, self.requires_lock) | 410 return "TestShard(name='%s', test_inputs=%s, requires_lock=%s'" % (self. name, self.test_inputs, self.requires_lock) |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 497 def split_at(seq, index): | 553 def split_at(seq, index): |
| 498 return (seq[:index], seq[index:]) | 554 return (seq[:index], seq[index:]) |
| 499 | 555 |
| 500 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) | 556 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) |
| 501 new_shards = [] | 557 new_shards = [] |
| 502 remaining_shards = old_shards | 558 remaining_shards = old_shards |
| 503 while remaining_shards: | 559 while remaining_shards: |
| 504 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new) | 560 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new) |
| 505 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards))) | 561 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards))) |
| 506 return new_shards | 562 return new_shards |
| OLD | NEW |