Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(668)

Side by Side Diff: Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py

Issue 1154343007: Add extra content_shell per worker for virtual tests, except on Android. (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: remove stray code Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright (C) 2011 Google Inc. All rights reserved. 1 # Copyright (C) 2011 Google Inc. All rights reserved.
2 # 2 #
3 # Redistribution and use in source and binary forms, with or without 3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are 4 # modification, are permitted provided that the following conditions are
5 # met: 5 # met:
6 # 6 #
7 # * Redistributions of source code must retain the above copyright 7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer. 8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above 9 # * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer 10 # copyright notice, this list of conditions and the following disclaimer
(...skipping 10 matching lines...) Expand all
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 28
29 import logging 29 import logging
30 import math 30 import math
31 import threading
32 import time 31 import time
33 32
34 from webkitpy.common import message_pool 33 from webkitpy.common import message_pool
35 from webkitpy.layout_tests.controllers import single_test_runner 34 from webkitpy.layout_tests.controllers import single_test_runner
36 from webkitpy.layout_tests.models.test_run_results import TestRunResults 35 from webkitpy.layout_tests.models.test_run_results import TestRunResults
37 from webkitpy.layout_tests.models import test_expectations 36 from webkitpy.layout_tests.models import test_expectations
38 from webkitpy.layout_tests.models import test_failures 37 from webkitpy.layout_tests.models import test_failures
39 from webkitpy.layout_tests.models import test_results 38 from webkitpy.layout_tests.models import test_results
40 from webkitpy.tool import grammar 39 from webkitpy.tool import grammar
41 40
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
201 pass 200 pass
202 201
203 def _handle_finished_test(self, worker_name, result, log_messages=[]): 202 def _handle_finished_test(self, worker_name, result, log_messages=[]):
204 self._update_summary_with_result(self._current_run_results, result) 203 self._update_summary_with_result(self._current_run_results, result)
205 204
206 def _handle_device_failed(self, worker_name, list_name, remaining_tests): 205 def _handle_device_failed(self, worker_name, list_name, remaining_tests):
207 _log.warning("%s has failed" % worker_name) 206 _log.warning("%s has failed" % worker_name)
208 if remaining_tests: 207 if remaining_tests:
209 self._shards_to_redo.append(TestShard(list_name, remaining_tests)) 208 self._shards_to_redo.append(TestShard(list_name, remaining_tests))
210 209
210
211 class Worker(object): 211 class Worker(object):
212 def __init__(self, caller, results_directory, options): 212 def __init__(self, caller, results_directory, options):
213 self._caller = caller 213 self._caller = caller
214 self._worker_number = caller.worker_number 214 self._worker_number = caller.worker_number
215 self._name = caller.name 215 self._name = caller.name
216 self._results_directory = results_directory 216 self._results_directory = results_directory
217 self._options = options 217 self._options = options
218 218
219 # The remaining fields are initialized in start() 219 # The remaining fields are initialized in start()
220 self._host = None 220 self._host = None
221 self._port = None 221 self._port = None
222 self._batch_size = None 222 self._batch_size = None
223 self._batch_count = None 223 self._batch_count = None
224 self._filesystem = None 224 self._filesystem = None
225 self._driver = None 225 self._primary_driver = None
226 self._secondary_driver = None
226 self._num_tests = 0 227 self._num_tests = 0
227 228
228 def __del__(self): 229 def __del__(self):
229 self.stop() 230 self.stop()
230 231
231 def start(self): 232 def start(self):
232 """This method is called when the object is starting to be used and it i s safe 233 """This method is called when the object is starting to be used and it i s safe
233 for the object to create state that does not need to be pickled (usually this means 234 for the object to create state that does not need to be pickled (usually this means
234 it is called in a child process).""" 235 it is called in a child process)."""
235 self._host = self._caller.host 236 self._host = self._caller.host
236 self._filesystem = self._host.filesystem 237 self._filesystem = self._host.filesystem
237 self._port = self._host.port_factory.get(self._options.platform, self._o ptions) 238 self._port = self._host.port_factory.get(self._options.platform, self._o ptions)
239 self._primary_driver = self._port.create_driver(self._worker_number)
240
241 if self._port.default_max_drivers_per_process > 1:
Dirk Pranke 2015/06/08 17:02:34 default_max_drivers_per_process is a method, not a
joelo 2015/06/08 20:34:08 Whoops, good catch.
242 self._secondary_driver = self._port.create_driver(self._worker_numbe r)
238 243
239 self._batch_count = 0 244 self._batch_count = 0
240 self._batch_size = self._options.batch_size or 0 245 self._batch_size = self._options.batch_size or 0
241 246
242 def handle(self, name, source, test_list_name, test_inputs): 247 def handle(self, name, source, test_list_name, test_inputs):
243 assert name == 'test_list' 248 assert name == 'test_list'
244 for i, test_input in enumerate(test_inputs): 249 for i, test_input in enumerate(test_inputs):
245 device_failed = self._run_test(test_input, test_list_name) 250 device_failed = self._run_test(test_input, test_list_name)
246 if device_failed: 251 if device_failed:
247 self._caller.post('device_failed', test_list_name, test_inputs[i :]) 252 self._caller.post('device_failed', test_list_name, test_inputs[i :])
248 self._caller.stop_running() 253 self._caller.stop_running()
249 return 254 return
250 255
256 # Kill the secondary driver at the end of each test shard.
257 self._kill_driver(self._secondary_driver, 'secondary')
258
251 self._caller.post('finished_test_list', test_list_name) 259 self._caller.post('finished_test_list', test_list_name)
252 260
253 def _update_test_input(self, test_input): 261 def _update_test_input(self, test_input):
254 if test_input.reference_files is None: 262 if test_input.reference_files is None:
255 # Lazy initialization. 263 # Lazy initialization.
256 test_input.reference_files = self._port.reference_files(test_input.t est_name) 264 test_input.reference_files = self._port.reference_files(test_input.t est_name)
257 if test_input.reference_files: 265 if test_input.reference_files:
258 test_input.should_run_pixel_test = True 266 test_input.should_run_pixel_test = True
259 else: 267 else:
260 test_input.should_run_pixel_test = self._port.should_run_as_pixel_te st(test_input) 268 test_input.should_run_pixel_test = self._port.should_run_as_pixel_te st(test_input)
261 269
262 def _run_test(self, test_input, shard_name): 270 def _run_test(self, test_input, shard_name):
263 self._batch_count += 1 271 self._batch_count += 1
264 272
265 stop_when_done = False 273 stop_when_done = False
266 if self._batch_size > 0 and self._batch_count >= self._batch_size: 274 if self._batch_size > 0 and self._batch_count >= self._batch_size:
267 self._batch_count = 0 275 self._batch_count = 0
268 stop_when_done = True 276 stop_when_done = True
269 277
270 self._update_test_input(test_input) 278 self._update_test_input(test_input)
271 test_timeout_sec = self._timeout(test_input) 279 test_timeout_sec = self._timeout(test_input)
272 start = time.time() 280 start = time.time()
273 device_failed = False 281 device_failed = False
274 282
275 if self._driver and self._driver.has_crashed():
276 self._kill_driver()
277 if not self._driver:
278 self._driver = self._port.create_driver(self._worker_number)
279
280 if not self._driver:
281 # FIXME: Is this the best way to handle a device crashing in the mid dle of the test, or should we create
282 # a new failure type?
283 device_failed = True
284 return device_failed
285
286 self._caller.post('started_test', test_input, test_timeout_sec) 283 self._caller.post('started_test', test_input, test_timeout_sec)
287 result = single_test_runner.run_single_test(self._port, self._options, s elf._results_directory, 284 result = single_test_runner.run_single_test(
288 self._name, self._driver, test_input, stop_when_done) 285 self._port, self._options, self._results_directory, self._name,
286 self._primary_driver, self._secondary_driver, test_input,
287 stop_when_done)
289 288
290 result.shard_name = shard_name 289 result.shard_name = shard_name
291 result.worker_name = self._name 290 result.worker_name = self._name
292 result.total_run_time = time.time() - start 291 result.total_run_time = time.time() - start
293 result.test_number = self._num_tests 292 result.test_number = self._num_tests
294 self._num_tests += 1 293 self._num_tests += 1
295 self._caller.post('finished_test', result) 294 self._caller.post('finished_test', result)
296 self._clean_up_after_test(test_input, result) 295 self._clean_up_after_test(test_input, result)
297 return result.device_failed 296 return result.device_failed
298 297
299 def stop(self): 298 def stop(self):
300 _log.debug("%s cleaning up" % self._name) 299 _log.debug("%s cleaning up" % self._name)
301 self._kill_driver() 300 self._kill_driver(self._primary_driver, "primary")
301 self._kill_driver(self._secondary_driver, "secondary")
302 302
303 def _timeout(self, test_input): 303 def _timeout(self, test_input):
304 """Compute the appropriate timeout value for a test.""" 304 """Compute the appropriate timeout value for a test."""
305 # The driver watchdog uses 2.5x the timeout; we want to be 305 # The driver watchdog uses 2.5x the timeout; we want to be
306 # larger than that. We also add a little more padding if we're 306 # larger than that. We also add a little more padding if we're
307 # running tests in a separate thread. 307 # running tests in a separate thread.
308 # 308 #
309 # Note that we need to convert the test timeout from a 309 # Note that we need to convert the test timeout from a
310 # string value in milliseconds to a float for Python. 310 # string value in milliseconds to a float for Python.
311 311
312 # FIXME: Can we just return the test_input.timeout now? 312 # FIXME: Can we just return the test_input.timeout now?
313 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0 313 driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
314 314
315 def _kill_driver(self): 315 def _kill_driver(self, driver, label):
316 # Be careful about how and when we kill the driver; if driver.stop() 316 # Be careful about how and when we kill the driver; if driver.stop()
317 # raises an exception, this routine may get re-entered via __del__. 317 # raises an exception, this routine may get re-entered via __del__.
318 driver = self._driver
319 self._driver = None
320 if driver: 318 if driver:
321 _log.debug("%s killing driver" % self._name) 319 _log.debug("%s killing %s driver" % (self._name, label))
322 driver.stop() 320 driver.stop()
323 321
324
325 def _clean_up_after_test(self, test_input, result): 322 def _clean_up_after_test(self, test_input, result):
326 test_name = test_input.test_name 323 test_name = test_input.test_name
327 324
328 if result.failures: 325 if result.failures:
329 # Check and kill the driver if we need to. 326 # Check and kill the driver if we need to.
330 if any([f.driver_needs_restart() for f in result.failures]): 327 if any([f.driver_needs_restart() for f in result.failures]):
331 self._kill_driver() 328 # FIXME: Need more information in failure reporting so
329 # we know which driver needs to be restarted. For now
330 # we kill both drivers.
331 self._kill_driver(self._primary_driver, "primary")
332 self._kill_driver(self._secondary_driver, "secondary")
333
332 # Reset the batch count since the shell just bounced. 334 # Reset the batch count since the shell just bounced.
333 self._batch_count = 0 335 self._batch_count = 0
334 336
335 # Print the error message(s). 337 # Print the error message(s).
336 _log.debug("%s %s failed:" % (self._name, test_name)) 338 _log.debug("%s %s failed:" % (self._name, test_name))
337 for f in result.failures: 339 for f in result.failures:
338 _log.debug("%s %s" % (self._name, f.message())) 340 _log.debug("%s %s" % (self._name, f.message()))
339 elif result.type == test_expectations.SKIP: 341 elif result.type == test_expectations.SKIP:
340 _log.debug("%s %s skipped" % (self._name, test_name)) 342 _log.debug("%s %s skipped" % (self._name, test_name))
341 else: 343 else:
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
497 def split_at(seq, index): 499 def split_at(seq, index):
498 return (seq[:index], seq[index:]) 500 return (seq[:index], seq[index:])
499 501
500 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards) 502 num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards)
501 new_shards = [] 503 new_shards = []
502 remaining_shards = old_shards 504 remaining_shards = old_shards
503 while remaining_shards: 505 while remaining_shards:
504 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new) 506 some_shards, remaining_shards = split_at(remaining_shards, num_old_p er_new)
505 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards))) 507 new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_sh ards) + 1), extract_and_flatten(some_shards)))
506 return new_shards 508 return new_shards
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698